1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
5 #include <linux/spinlock.h>
6 #include <linux/stop_machine.h>
7 #include <linux/uaccess.h>
9 #include <asm/cacheflush.h>
10 #include <asm/fixmap.h>
12 #include <asm/kprobes.h>
13 #include <asm/patching.h>
14 #include <asm/sections.h>
16 static DEFINE_RAW_SPINLOCK(patch_lock);
18 static bool is_exit_text(unsigned long addr)
20 /* discarded with init text/data */
21 return system_state < SYSTEM_RUNNING &&
22 addr >= (unsigned long)__exittext_begin &&
23 addr < (unsigned long)__exittext_end;
26 static bool is_image_text(unsigned long addr)
28 return core_kernel_text(addr) || is_exit_text(addr);
31 static void __kprobes *patch_map(void *addr, int fixmap)
33 unsigned long uintaddr = (uintptr_t) addr;
34 bool image = is_image_text(uintaddr);
38 page = phys_to_page(__pa_symbol(addr));
39 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
40 page = vmalloc_to_page(addr);
45 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
46 (uintaddr & ~PAGE_MASK));
49 static void __kprobes patch_unmap(int fixmap)
54 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
57 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
62 ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
64 *insnp = le32_to_cpu(val);
69 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
72 unsigned long flags = 0;
75 raw_spin_lock_irqsave(&patch_lock, flags);
76 waddr = patch_map(addr, FIX_TEXT_POKE0);
78 ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
80 patch_unmap(FIX_TEXT_POKE0);
81 raw_spin_unlock_irqrestore(&patch_lock, flags);
86 int __kprobes aarch64_insn_write(void *addr, u32 insn)
88 return __aarch64_insn_write(addr, cpu_to_le32(insn));
91 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
96 /* A64 instructions must be word aligned */
97 if ((uintptr_t)tp & 0x3)
100 ret = aarch64_insn_write(tp, insn);
102 caches_clean_inval_pou((uintptr_t)tp,
103 (uintptr_t)tp + AARCH64_INSN_SIZE);
108 struct aarch64_insn_patch {
115 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
118 struct aarch64_insn_patch *pp = arg;
120 /* The last CPU becomes master */
121 if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
122 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
123 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
125 /* Notify other processors with an additional increment. */
126 atomic_inc(&pp->cpu_count);
128 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
136 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
138 struct aarch64_insn_patch patch = {
142 .cpu_count = ATOMIC_INIT(0),
148 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,