1 // SPDX-License-Identifier: GPL-2.0-only
3 * User address space access functions.
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2002 Andi Kleen <ak@suse.de>
9 #include <linux/export.h>
10 #include <linux/uaccess.h>
11 #include <linux/highmem.h>
17 unsigned long __clear_user(void __user *addr, unsigned long size)
21 /* no memory constraint because it doesn't change any memory gcc knows
25 " testq %[size8],%[size8]\n"
28 "0: movq $0,(%[dst])\n"
30 " decl %%ecx ; jnz 0b\n"
31 "4: movq %[size1],%%rcx\n"
32 " testl %%ecx,%%ecx\n"
34 "1: movb $0,(%[dst])\n"
36 " decl %%ecx ; jnz 1b\n"
39 _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN8, %[size1])
40 _ASM_EXTABLE_UA(1b, 2b)
42 : [size8] "=&c"(size), [dst] "=&D" (__d0)
43 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
47 EXPORT_SYMBOL(__clear_user);
49 unsigned long clear_user(void __user *to, unsigned long n)
52 return __clear_user(to, n);
55 EXPORT_SYMBOL(clear_user);
57 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
59 * clean_cache_range - write back a cache range with CLWB
60 * @vaddr: virtual start address
61 * @size: number of bytes to write back
63 * Write back a cache range using the CLWB (cache line write back)
64 * instruction. Note that @size is internally rounded up to be cache
67 static void clean_cache_range(void *addr, size_t size)
69 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
70 unsigned long clflush_mask = x86_clflush_size - 1;
71 void *vend = addr + size;
74 for (p = (void *)((unsigned long)addr & ~clflush_mask);
75 p < vend; p += x86_clflush_size)
79 void arch_wb_cache_pmem(void *addr, size_t size)
81 clean_cache_range(addr, size);
83 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
85 long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
87 unsigned long flushed, dest = (unsigned long) dst;
88 long rc = __copy_user_nocache(dst, src, size, 0);
91 * __copy_user_nocache() uses non-temporal stores for the bulk
92 * of the transfer, but we need to manually flush if the
93 * transfer is unaligned. A cached memory copy is used when
94 * destination or size is not naturally aligned. That is:
95 * - Require 8-byte alignment when size is 8 bytes or larger.
96 * - Require 4-byte alignment when size is 4 bytes.
99 if (!IS_ALIGNED(dest, 4) || size != 4)
100 clean_cache_range(dst, size);
102 if (!IS_ALIGNED(dest, 8)) {
103 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
104 clean_cache_range(dst, 1);
107 flushed = dest - (unsigned long) dst;
108 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
109 clean_cache_range(dst + size - 1, 1);
115 void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
117 unsigned long dest = (unsigned long) _dst;
118 unsigned long source = (unsigned long) _src;
120 /* cache copy and flush to align dest */
121 if (!IS_ALIGNED(dest, 8)) {
122 size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
124 memcpy((void *) dest, (void *) source, len);
125 clean_cache_range((void *) dest, len);
133 /* 4x8 movnti loop */
135 asm("movq (%0), %%r8\n"
137 "movq 16(%0), %%r10\n"
138 "movq 24(%0), %%r11\n"
139 "movnti %%r8, (%1)\n"
140 "movnti %%r9, 8(%1)\n"
141 "movnti %%r10, 16(%1)\n"
142 "movnti %%r11, 24(%1)\n"
143 :: "r" (source), "r" (dest)
144 : "memory", "r8", "r9", "r10", "r11");
150 /* 1x8 movnti loop */
152 asm("movq (%0), %%r8\n"
153 "movnti %%r8, (%1)\n"
154 :: "r" (source), "r" (dest)
161 /* 1x4 movnti loop */
163 asm("movl (%0), %%r8d\n"
164 "movnti %%r8d, (%1)\n"
165 :: "r" (source), "r" (dest)
172 /* cache copy for remaining bytes */
174 memcpy((void *) dest, (void *) source, size);
175 clean_cache_range((void *) dest, size);
178 EXPORT_SYMBOL_GPL(__memcpy_flushcache);
180 void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
183 char *from = kmap_atomic(page);
185 memcpy_flushcache(to, from + offset, len);