1 // SPDX-License-Identifier: GPL-2.0
3 * Access kernel memory without faulting -- s390 specific implementation.
5 * Copyright IBM Corp. 2009, 2015
9 #include <linux/uaccess.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/gfp.h>
14 #include <linux/cpu.h>
15 #include <linux/uio.h>
17 #include <asm/asm-extable.h>
18 #include <asm/abs_lowcore.h>
19 #include <asm/stacktrace.h>
20 #include <asm/maccess.h>
21 #include <asm/ctlreg.h>
23 unsigned long __bootdata_preserved(__memcpy_real_area);
24 pte_t *__bootdata_preserved(memcpy_real_ptep);
25 static DEFINE_MUTEX(memcpy_real_mutex);
27 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
29 unsigned long aligned, offset, count;
32 aligned = (unsigned long) dst & ~7UL;
33 offset = (unsigned long) dst & 7UL;
34 size = min(8UL - offset, size);
38 " mvc 0(1,%4),0(%5)\n"
39 "0: mvc 0(8,%3),0(%0)\n"
44 : "+&a" (aligned), "+&a" (count), "=m" (tmp)
45 : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
46 : "cc", "memory", "1");
51 * s390_kernel_write - write to kernel memory bypassing DAT
52 * @dst: destination address
53 * @src: source address
54 * @size: number of bytes to copy
56 * This function writes to kernel memory bypassing DAT and possible page table
57 * write protection. It writes to the destination using the sturg instruction.
58 * Therefore we have a read-modify-write sequence: the function reads eight
59 * bytes from destination at an eight byte boundary, modifies the bytes
60 * requested and writes the result back in a loop.
62 static DEFINE_SPINLOCK(s390_kernel_write_lock);
64 notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
70 spin_lock_irqsave(&s390_kernel_write_lock, flags);
72 copied = s390_kernel_write_odd(tmp, src, size);
77 spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
82 size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
84 size_t len, copied, res = 0;
85 unsigned long phys, offset;
89 BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE);
91 phys = src & MEMCPY_REAL_MASK;
92 offset = src & ~MEMCPY_REAL_MASK;
93 chunk = (void *)(__memcpy_real_area + offset);
94 len = min(count, MEMCPY_REAL_SIZE - offset);
95 pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
97 mutex_lock(&memcpy_real_mutex);
98 if (pte_val(pte) != pte_val(*memcpy_real_ptep)) {
99 __ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL);
100 set_pte(memcpy_real_ptep, pte);
102 copied = copy_to_iter(chunk, len, iter);
103 mutex_unlock(&memcpy_real_mutex);
114 int memcpy_real(void *dest, unsigned long src, size_t count)
116 struct iov_iter iter;
119 kvec.iov_base = dest;
120 kvec.iov_len = count;
121 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
122 if (memcpy_real_iter(&iter, src, count) < count)
128 * Find CPU that owns swapped prefix page
130 static int get_swapped_owner(phys_addr_t addr)
135 for_each_online_cpu(cpu) {
136 lc = virt_to_phys(lowcore_ptr[cpu]);
137 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
145 * Convert a physical pointer for /dev/mem access
147 * For swapped prefix pages a new buffer is returned that contains a copy of
148 * the absolute memory. The buffer size is maximum one page large.
150 void *xlate_dev_mem_ptr(phys_addr_t addr)
152 void *ptr = phys_to_virt(addr);
154 struct lowcore *abs_lc;
159 this_cpu = get_cpu();
160 if (addr >= sizeof(struct lowcore)) {
161 cpu = get_swapped_owner(addr);
165 bounce = (void *)__get_free_page(GFP_ATOMIC);
168 size = PAGE_SIZE - (addr & ~PAGE_MASK);
169 if (addr < sizeof(struct lowcore)) {
170 abs_lc = get_abs_lowcore();
171 ptr = (void *)abs_lc + addr;
172 memcpy(bounce, ptr, size);
173 put_abs_lowcore(abs_lc);
174 } else if (cpu == this_cpu) {
175 ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
176 memcpy(bounce, ptr, size);
178 memcpy(bounce, ptr, size);
187 * Free converted buffer for /dev/mem access (if necessary)
189 void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
191 if (addr != virt_to_phys(ptr))
192 free_page((unsigned long)ptr);