2 * arch/xtensa/mm/cache.c
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2001-2006 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/bootmem.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
31 #include <asm/tlbflush.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
36 //#define printd(x...) printk(x)
37 #define printd(x...) do { } while(0)
41 * The kernel provides one architecture bit PG_arch_1 in the page flags that
42 * can be used for cache coherency.
46 * The Xtensa architecture doesn't keep the instruction cache coherent with
47 * the data cache. We use the architecture bit to indicate if the caches
48 * are coherent. The kernel clears this bit whenever a page is added to the
49 * page cache. At that time, the caches might not be in sync. We, therefore,
50 * define this flag as 'clean' if set.
54 * With cache aliasing, we have to always flush the cache when pages are
55 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
62 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
63 static inline void kmap_invalidate_coherent(struct page *page,
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
69 if (!PageHighMem(page)) {
70 kvaddr = (unsigned long)page_to_virt(page);
72 __invalidate_dcache_page(kvaddr);
74 kvaddr = TLBTEMP_BASE_1 +
75 (page_to_phys(page) & DCACHE_ALIAS_MASK);
78 __invalidate_dcache_page_alias(kvaddr,
85 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
86 unsigned long vaddr, unsigned long *paddr)
88 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
89 *paddr = page_to_phys(page);
90 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
93 return page_to_virt(page);
97 void clear_user_highpage(struct page *page, unsigned long vaddr)
100 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
103 kmap_invalidate_coherent(page, vaddr);
104 set_bit(PG_arch_1, &page->flags);
105 clear_page_alias(kvaddr, paddr);
108 EXPORT_SYMBOL(clear_user_highpage);
110 void copy_user_highpage(struct page *dst, struct page *src,
111 unsigned long vaddr, struct vm_area_struct *vma)
113 unsigned long dst_paddr, src_paddr;
114 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
116 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
120 kmap_invalidate_coherent(dst, vaddr);
121 set_bit(PG_arch_1, &dst->flags);
122 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
125 EXPORT_SYMBOL(copy_user_highpage);
128 * Any time the kernel writes to a user page cache page, or it is about to
129 * read from a page cache page this routine is called.
133 void flush_dcache_page(struct page *page)
135 struct address_space *mapping = page_mapping(page);
138 * If we have a mapping but the page is not mapped to user-space
139 * yet, we simply mark this page dirty and defer flushing the
140 * caches until update_mmu().
143 if (mapping && !mapping_mapped(mapping)) {
144 if (!test_bit(PG_arch_1, &page->flags))
145 set_bit(PG_arch_1, &page->flags);
150 unsigned long phys = page_to_phys(page);
151 unsigned long temp = page->index << PAGE_SHIFT;
152 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
156 * Flush the page in kernel space and user space.
157 * Note that we can omit that step if aliasing is not
158 * an issue, but we do have to synchronize I$ and D$
159 * if we have a mapping.
162 if (!alias && !mapping)
166 virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
167 __flush_invalidate_dcache_page_alias(virt, phys);
169 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
172 __flush_invalidate_dcache_page_alias(virt, phys);
175 __invalidate_icache_page_alias(virt, phys);
179 /* There shouldn't be an entry in the cache for this page anymore. */
181 EXPORT_SYMBOL(flush_dcache_page);
184 * For now, flush the whole cache. FIXME??
187 void local_flush_cache_range(struct vm_area_struct *vma,
188 unsigned long start, unsigned long end)
190 __flush_invalidate_dcache_all();
191 __invalidate_icache_all();
193 EXPORT_SYMBOL(local_flush_cache_range);
196 * Remove any entry in the cache for this page.
198 * Note that this function is only called for user pages, so use the
199 * alias versions of the cache flush functions.
202 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
205 /* Note that we have to use the 'alias' address to avoid multi-hit */
207 unsigned long phys = page_to_phys(pfn_to_page(pfn));
208 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
211 __flush_invalidate_dcache_page_alias(virt, phys);
212 __invalidate_icache_page_alias(virt, phys);
215 EXPORT_SYMBOL(local_flush_cache_page);
217 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
220 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
222 unsigned long pfn = pte_pfn(*ptep);
228 page = pfn_to_page(pfn);
230 /* Invalidate old entry in TLBs */
232 flush_tlb_page(vma, addr);
234 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
236 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
237 unsigned long phys = page_to_phys(page);
241 tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
242 __flush_invalidate_dcache_page_alias(tmp, phys);
243 tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
244 __flush_invalidate_dcache_page_alias(tmp, phys);
245 __invalidate_icache_page_alias(tmp, phys);
248 clear_bit(PG_arch_1, &page->flags);
251 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
252 && (vma->vm_flags & VM_EXEC) != 0) {
253 unsigned long paddr = (unsigned long)kmap_atomic(page);
254 __flush_dcache_page(paddr);
255 __invalidate_icache_page(paddr);
256 set_bit(PG_arch_1, &page->flags);
257 kunmap_atomic((void *)paddr);
263 * access_process_vm() has called get_user_pages(), which has done a
264 * flush_dcache_page() on the page.
267 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
269 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
270 unsigned long vaddr, void *dst, const void *src,
273 unsigned long phys = page_to_phys(page);
274 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
276 /* Flush and invalidate user page if aliased. */
279 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
281 __flush_invalidate_dcache_page_alias(t, phys);
287 memcpy(dst, src, len);
290 * Flush and invalidate kernel page if aliased and synchronize
291 * data and instruction caches for executable pages.
295 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
298 __flush_invalidate_dcache_range((unsigned long) dst, len);
299 if ((vma->vm_flags & VM_EXEC) != 0)
300 __invalidate_icache_page_alias(t, phys);
303 } else if ((vma->vm_flags & VM_EXEC) != 0) {
304 __flush_dcache_range((unsigned long)dst,len);
305 __invalidate_icache_range((unsigned long) dst, len);
309 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
310 unsigned long vaddr, void *dst, const void *src,
313 unsigned long phys = page_to_phys(page);
314 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
317 * Flush user page if aliased.
318 * (Note: a simply flush would be sufficient)
322 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
324 __flush_invalidate_dcache_page_alias(t, phys);
328 memcpy(dst, src, len);