1 // SPDX-License-Identifier: GPL-2.0
3 * r2300.c: R2000 and R3000 specific mmu/cache code.
5 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * with a lot of changes to make this thing work for R3000s
8 * Tx39XX R4k style caches added. HK
9 * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
10 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
18 #include <asm/cacheops.h>
20 #include <asm/mmu_context.h>
21 #include <asm/isadep.h>
23 #include <asm/bootinfo.h>
26 /* For R3000 cores with R4000 style caches */
27 static unsigned long icache_size, dcache_size; /* Size in bytes */
29 #include <asm/r4kcache.h>
31 /* This sequence is required to ensure icache is disabled immediately */
32 #define TX39_STOP_STREAMING() \
33 __asm__ __volatile__( \
35 ".set noreorder\n\t" \
42 /* TX39H-style cache flush routines. */
43 static void tx39h_flush_icache_all(void)
45 unsigned long flags, config;
47 /* disable icache (set ICE#) */
48 local_irq_save(flags);
49 config = read_c0_conf();
50 write_c0_conf(config & ~TX39_CONF_ICE);
51 TX39_STOP_STREAMING();
53 write_c0_conf(config);
54 local_irq_restore(flags);
57 static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
59 /* Catch bad driver code */
63 blast_inv_dcache_range(addr, addr + size);
68 static inline void tx39_blast_dcache_page(unsigned long addr)
70 if (current_cpu_type() != CPU_TX3912)
71 blast_dcache16_page(addr);
74 static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
76 blast_dcache16_page_indexed(addr);
79 static inline void tx39_blast_dcache(void)
84 static inline void tx39_blast_icache_page(unsigned long addr)
86 unsigned long flags, config;
87 /* disable icache (set ICE#) */
88 local_irq_save(flags);
89 config = read_c0_conf();
90 write_c0_conf(config & ~TX39_CONF_ICE);
91 TX39_STOP_STREAMING();
92 blast_icache16_page(addr);
93 write_c0_conf(config);
94 local_irq_restore(flags);
97 static inline void tx39_blast_icache_page_indexed(unsigned long addr)
99 unsigned long flags, config;
100 /* disable icache (set ICE#) */
101 local_irq_save(flags);
102 config = read_c0_conf();
103 write_c0_conf(config & ~TX39_CONF_ICE);
104 TX39_STOP_STREAMING();
105 blast_icache16_page_indexed(addr);
106 write_c0_conf(config);
107 local_irq_restore(flags);
110 static inline void tx39_blast_icache(void)
112 unsigned long flags, config;
113 /* disable icache (set ICE#) */
114 local_irq_save(flags);
115 config = read_c0_conf();
116 write_c0_conf(config & ~TX39_CONF_ICE);
117 TX39_STOP_STREAMING();
119 write_c0_conf(config);
120 local_irq_restore(flags);
123 static void tx39__flush_cache_vmap(void)
128 static void tx39__flush_cache_vunmap(void)
133 static inline void tx39_flush_cache_all(void)
135 if (!cpu_has_dc_aliases)
141 static inline void tx39___flush_cache_all(void)
147 static void tx39_flush_cache_mm(struct mm_struct *mm)
149 if (!cpu_has_dc_aliases)
152 if (cpu_context(smp_processor_id(), mm) != 0)
156 static void tx39_flush_cache_range(struct vm_area_struct *vma,
157 unsigned long start, unsigned long end)
159 if (!cpu_has_dc_aliases)
161 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
167 static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
169 int exec = vma->vm_flags & VM_EXEC;
170 struct mm_struct *mm = vma->vm_mm;
175 * If ownes no valid ASID yet, cannot possibly have gotten
176 * this page into the cache.
178 if (cpu_context(smp_processor_id(), mm) == 0)
182 pmdp = pmd_off(mm, page);
183 ptep = pte_offset_kernel(pmdp, page);
186 * If the page isn't marked valid, the page cannot possibly be
189 if (!(pte_val(*ptep) & _PAGE_PRESENT))
193 * Doing flushes for another ASID than the current one is
194 * too difficult since stupid R4k caches do a TLB translation
195 * for every cache flush operation. So we do indexed flushes
196 * in that case, which doesn't overly flush the cache too much.
198 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
199 if (cpu_has_dc_aliases || exec)
200 tx39_blast_dcache_page(page);
202 tx39_blast_icache_page(page);
208 * Do indexed flush, too much work to get the (possible) TLB refills
211 if (cpu_has_dc_aliases || exec)
212 tx39_blast_dcache_page_indexed(page);
214 tx39_blast_icache_page_indexed(page);
217 static void local_tx39_flush_data_cache_page(void * addr)
219 tx39_blast_dcache_page((unsigned long)addr);
222 static void tx39_flush_data_cache_page(unsigned long addr)
224 tx39_blast_dcache_page(addr);
227 static void tx39_flush_icache_range(unsigned long start, unsigned long end)
229 if (end - start > dcache_size)
232 protected_blast_dcache_range(start, end);
234 if (end - start > icache_size)
237 unsigned long flags, config;
238 /* disable icache (set ICE#) */
239 local_irq_save(flags);
240 config = read_c0_conf();
241 write_c0_conf(config & ~TX39_CONF_ICE);
242 TX39_STOP_STREAMING();
243 protected_blast_icache_range(start, end);
244 write_c0_conf(config);
245 local_irq_restore(flags);
249 static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
254 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
258 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
261 tx39_blast_dcache_page(addr);
263 } while(addr != end);
264 } else if (size > dcache_size) {
267 blast_dcache_range(addr, addr + size);
271 static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
275 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
278 tx39_blast_dcache_page(addr);
280 } while(addr != end);
281 } else if (size > dcache_size) {
284 blast_inv_dcache_range(addr, addr + size);
288 static __init void tx39_probe_cache(void)
290 unsigned long config;
292 config = read_c0_conf();
294 icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
295 TX39_CONF_ICS_SHIFT));
296 dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
297 TX39_CONF_DCS_SHIFT));
299 current_cpu_data.icache.linesz = 16;
300 switch (current_cpu_type()) {
302 current_cpu_data.icache.ways = 1;
303 current_cpu_data.dcache.ways = 1;
304 current_cpu_data.dcache.linesz = 4;
308 current_cpu_data.icache.ways = 2;
309 current_cpu_data.dcache.ways = 2;
310 current_cpu_data.dcache.linesz = 16;
315 current_cpu_data.icache.ways = 1;
316 current_cpu_data.dcache.ways = 1;
317 current_cpu_data.dcache.linesz = 16;
322 void tx39_cache_init(void)
324 extern void build_clear_page(void);
325 extern void build_copy_page(void);
326 unsigned long config;
328 config = read_c0_conf();
329 config &= ~TX39_CONF_WBON;
330 write_c0_conf(config);
334 switch (current_cpu_type()) {
336 /* TX39/H core (writethru direct-map cache) */
337 __flush_cache_vmap = tx39__flush_cache_vmap;
338 __flush_cache_vunmap = tx39__flush_cache_vunmap;
339 flush_cache_all = tx39h_flush_icache_all;
340 __flush_cache_all = tx39h_flush_icache_all;
341 flush_cache_mm = (void *) tx39h_flush_icache_all;
342 flush_cache_range = (void *) tx39h_flush_icache_all;
343 flush_cache_page = (void *) tx39h_flush_icache_all;
344 flush_icache_range = (void *) tx39h_flush_icache_all;
345 local_flush_icache_range = (void *) tx39h_flush_icache_all;
347 local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
348 flush_data_cache_page = (void *) tx39h_flush_icache_all;
350 _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
352 shm_align_mask = PAGE_SIZE - 1;
359 /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
360 /* board-dependent init code may set WBON */
362 __flush_cache_vmap = tx39__flush_cache_vmap;
363 __flush_cache_vunmap = tx39__flush_cache_vunmap;
365 flush_cache_all = tx39_flush_cache_all;
366 __flush_cache_all = tx39___flush_cache_all;
367 flush_cache_mm = tx39_flush_cache_mm;
368 flush_cache_range = tx39_flush_cache_range;
369 flush_cache_page = tx39_flush_cache_page;
370 flush_icache_range = tx39_flush_icache_range;
371 local_flush_icache_range = tx39_flush_icache_range;
373 __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
375 local_flush_data_cache_page = local_tx39_flush_data_cache_page;
376 flush_data_cache_page = tx39_flush_data_cache_page;
378 _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
379 _dma_cache_wback = tx39_dma_cache_wback_inv;
380 _dma_cache_inv = tx39_dma_cache_inv;
382 shm_align_mask = max_t(unsigned long,
383 (dcache_size / current_cpu_data.dcache.ways) - 1,
389 __flush_icache_user_range = flush_icache_range;
390 __local_flush_icache_user_range = local_flush_icache_range;
392 current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
393 current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
395 current_cpu_data.icache.sets =
396 current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
397 current_cpu_data.dcache.sets =
398 current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
400 if (current_cpu_data.dcache.waysize > PAGE_SIZE)
401 current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
403 current_cpu_data.icache.waybit = 0;
404 current_cpu_data.dcache.waybit = 0;
406 pr_info("Primary instruction cache %ldkB, linesize %d bytes\n",
407 icache_size >> 10, current_cpu_data.icache.linesz);
408 pr_info("Primary data cache %ldkB, linesize %d bytes\n",
409 dcache_size >> 10, current_cpu_data.dcache.linesz);
413 tx39h_flush_icache_all();