1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/copypage-v6.c
5 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
7 #include <linux/init.h>
8 #include <linux/spinlock.h>
10 #include <linux/highmem.h>
12 #include <asm/pgtable.h>
13 #include <asm/shmparam.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cachetype.h>
24 static DEFINE_RAW_SPINLOCK(v6_lock);
27 * Copy the user page. No aliasing to deal with so we can just
28 * attack the kernel's existing mapping of these pages.
30 static void v6_copy_user_highpage_nonaliasing(struct page *to,
31 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
35 kfrom = kmap_atomic(from);
36 kto = kmap_atomic(to);
37 copy_page(kto, kfrom);
43 * Clear the user page. No aliasing to deal with so we can just
44 * attack the kernel's existing mapping of this page.
46 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
48 void *kaddr = kmap_atomic(page);
54 * Discard data in the kernel mapping for the new page.
55 * FIXME: needs this MCRR to be supported.
57 static void discard_old_kernel_data(void *kto)
59 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
62 "r" ((unsigned long)kto + PAGE_SIZE - 1)
67 * Copy the page, taking account of the cache colour.
69 static void v6_copy_user_highpage_aliasing(struct page *to,
70 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
72 unsigned int offset = CACHE_COLOUR(vaddr);
73 unsigned long kfrom, kto;
75 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
76 __flush_dcache_page(page_mapping_file(from), from);
78 /* FIXME: not highmem safe */
79 discard_old_kernel_data(page_address(to));
82 * Now copy the page using the same cache colour as the
83 * pages ultimate destination.
85 raw_spin_lock(&v6_lock);
87 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
88 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
90 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
91 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
93 copy_page((void *)kto, (void *)kfrom);
95 raw_spin_unlock(&v6_lock);
99 * Clear the user page. We need to deal with the aliasing issues,
100 * so remap the kernel page into the same cache colour as the user
103 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
105 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
107 /* FIXME: not highmem safe */
108 discard_old_kernel_data(page_address(page));
111 * Now clear the page using the same cache colour as
112 * the pages ultimate destination.
114 raw_spin_lock(&v6_lock);
116 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
117 clear_page((void *)to);
119 raw_spin_unlock(&v6_lock);
122 struct cpu_user_fns v6_user_fns __initdata = {
123 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
124 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
127 static int __init v6_userpage_init(void)
129 if (cache_is_vipt_aliasing()) {
130 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
131 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
137 core_initcall(v6_userpage_init);