Mention branches and keyring.
[releases.git] / riscv / kernel / patch.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 SiFive
4  */
5
6 #include <linux/spinlock.h>
7 #include <linux/mm.h>
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/stop_machine.h>
11 #include <asm/kprobes.h>
12 #include <asm/cacheflush.h>
13 #include <asm/fixmap.h>
14 #include <asm/ftrace.h>
15 #include <asm/patch.h>
16 #include <asm/sections.h>
17
18 struct patch_insn {
19         void *addr;
20         u32 insn;
21         atomic_t cpu_count;
22 };
23
24 int riscv_patch_in_stop_machine = false;
25
26 #ifdef CONFIG_MMU
27
28 static inline bool is_kernel_exittext(uintptr_t addr)
29 {
30         return system_state < SYSTEM_RUNNING &&
31                 addr >= (uintptr_t)__exittext_begin &&
32                 addr < (uintptr_t)__exittext_end;
33 }
34
35 /*
36  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
37  * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
38  * So use '__always_inline' and 'const unsigned int fixmap' here.
39  */
40 static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
41 {
42         uintptr_t uintaddr = (uintptr_t) addr;
43         struct page *page;
44
45         if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
46                 page = phys_to_page(__pa_symbol(addr));
47         else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
48                 page = vmalloc_to_page(addr);
49         else
50                 return addr;
51
52         BUG_ON(!page);
53
54         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
55                                          (uintaddr & ~PAGE_MASK));
56 }
57
58 static void patch_unmap(int fixmap)
59 {
60         clear_fixmap(fixmap);
61 }
62 NOKPROBE_SYMBOL(patch_unmap);
63
64 static int patch_insn_write(void *addr, const void *insn, size_t len)
65 {
66         void *waddr = addr;
67         bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
68         int ret;
69
70         /*
71          * Before reaching here, it was expected to lock the text_mutex
72          * already, so we don't need to give another lock here and could
73          * ensure that it was safe between each cores.
74          *
75          * We're currently using stop_machine() for ftrace & kprobes, and while
76          * that ensures text_mutex is held before installing the mappings it
77          * does not ensure text_mutex is held by the calling thread.  That's
78          * safe but triggers a lockdep failure, so just elide it for that
79          * specific case.
80          */
81         if (!riscv_patch_in_stop_machine)
82                 lockdep_assert_held(&text_mutex);
83
84         if (across_pages)
85                 patch_map(addr + len, FIX_TEXT_POKE1);
86
87         waddr = patch_map(addr, FIX_TEXT_POKE0);
88
89         ret = copy_to_kernel_nofault(waddr, insn, len);
90
91         patch_unmap(FIX_TEXT_POKE0);
92
93         if (across_pages)
94                 patch_unmap(FIX_TEXT_POKE1);
95
96         return ret;
97 }
98 NOKPROBE_SYMBOL(patch_insn_write);
99 #else
100 static int patch_insn_write(void *addr, const void *insn, size_t len)
101 {
102         return copy_to_kernel_nofault(addr, insn, len);
103 }
104 NOKPROBE_SYMBOL(patch_insn_write);
105 #endif /* CONFIG_MMU */
106
107 int patch_text_nosync(void *addr, const void *insns, size_t len)
108 {
109         u32 *tp = addr;
110         int ret;
111
112         ret = patch_insn_write(tp, insns, len);
113
114         if (!ret)
115                 flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
116
117         return ret;
118 }
119 NOKPROBE_SYMBOL(patch_text_nosync);
120
121 static int patch_text_cb(void *data)
122 {
123         struct patch_insn *patch = data;
124         int ret = 0;
125
126         if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
127                 ret =
128                     patch_text_nosync(patch->addr, &patch->insn,
129                                             GET_INSN_LENGTH(patch->insn));
130                 atomic_inc(&patch->cpu_count);
131         } else {
132                 while (atomic_read(&patch->cpu_count) <= num_online_cpus())
133                         cpu_relax();
134                 smp_mb();
135         }
136
137         return ret;
138 }
139 NOKPROBE_SYMBOL(patch_text_cb);
140
141 int patch_text(void *addr, u32 insn)
142 {
143         int ret;
144         struct patch_insn patch = {
145                 .addr = addr,
146                 .insn = insn,
147                 .cpu_count = ATOMIC_INIT(0),
148         };
149
150         /*
151          * kprobes takes text_mutex, before calling patch_text(), but as we call
152          * calls stop_machine(), the lockdep assertion in patch_insn_write()
153          * gets confused by the context in which the lock is taken.
154          * Instead, ensure the lock is held before calling stop_machine(), and
155          * set riscv_patch_in_stop_machine to skip the check in
156          * patch_insn_write().
157          */
158         lockdep_assert_held(&text_mutex);
159         riscv_patch_in_stop_machine = true;
160         ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
161         riscv_patch_in_stop_machine = false;
162         return ret;
163 }
164 NOKPROBE_SYMBOL(patch_text);