1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
30 #include <asm/fixmap.h>
31 #include <asm/paravirt.h>
32 #include <asm/asm-prototypes.h>
34 int __read_mostly alternatives_patched;
36 EXPORT_SYMBOL_GPL(alternatives_patched);
38 #define MAX_PATCH_LEN (255-1)
40 static int __initdata_or_module debug_alternative;
42 static int __init debug_alt(char *str)
44 debug_alternative = 1;
47 __setup("debug-alternative", debug_alt);
49 static int noreplace_smp;
51 static int __init setup_noreplace_smp(char *str)
56 __setup("noreplace-smp", setup_noreplace_smp);
58 #define DPRINTK(fmt, args...) \
60 if (debug_alternative) \
61 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
64 #define DUMP_BYTES(buf, len, fmt, args...) \
66 if (unlikely(debug_alternative)) { \
72 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
73 for (j = 0; j < (len) - 1; j++) \
74 printk(KERN_CONT "%02hhx ", buf[j]); \
75 printk(KERN_CONT "%02hhx\n", buf[j]); \
79 static const unsigned char x86nops[] =
91 const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
98 x86nops + 1 + 2 + 3 + 4,
99 x86nops + 1 + 2 + 3 + 4 + 5,
100 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
101 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
104 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
105 static void __init_or_module add_nops(void *insns, unsigned int len)
108 unsigned int noplen = len;
109 if (noplen > ASM_NOP_MAX)
110 noplen = ASM_NOP_MAX;
111 memcpy(insns, x86_nops[noplen], noplen);
117 extern s32 __retpoline_sites[], __retpoline_sites_end[];
118 extern s32 __return_sites[], __return_sites_end[];
119 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
120 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
121 extern s32 __smp_locks[], __smp_locks_end[];
122 void text_poke_early(void *addr, const void *opcode, size_t len);
125 * Are we looking at a near JMP with a 1 or 4-byte displacement.
127 static inline bool is_jmp(const u8 opcode)
129 return opcode == 0xeb || opcode == 0xe9;
132 static void __init_or_module
133 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
135 u8 *next_rip, *tgt_rip;
139 if (a->replacementlen != 5)
142 o_dspl = *(s32 *)(insn_buff + 1);
144 /* next_rip of the replacement JMP */
145 next_rip = repl_insn + a->replacementlen;
146 /* target rip of the replacement JMP */
147 tgt_rip = next_rip + o_dspl;
148 n_dspl = tgt_rip - orig_insn;
150 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
152 if (tgt_rip - orig_insn >= 0) {
153 if (n_dspl - 2 <= 127)
157 /* negative offset */
159 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
169 insn_buff[1] = (s8)n_dspl;
170 add_nops(insn_buff + 2, 3);
179 *(s32 *)&insn_buff[1] = n_dspl;
185 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
186 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
190 * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
192 * @instr: instruction byte stream
193 * @instrlen: length of the above
194 * @off: offset within @instr where the first NOP has been detected
196 * Return: number of NOPs found (and replaced).
198 static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
203 while (i < instrlen) {
204 if (instr[i] != 0x90)
215 local_irq_save(flags);
216 add_nops(instr + off, nnops);
217 local_irq_restore(flags);
219 DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
225 * "noinline" to cause control flow change and thus invalidate I$ and
226 * cause refetch after modification.
228 static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
234 * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
238 if (insn_decode_kernel(&insn, &instr[i]))
242 * See if this and any potentially following NOPs can be
245 if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
246 i += optimize_nops_range(instr, len, i);
256 * Replace instructions with better alternatives for this CPU type. This runs
257 * before SMP is initialized to avoid SMP problems with self modifying code.
258 * This implies that asymmetric systems where APs have less capabilities than
259 * the boot processor are not handled. Tough. Make sure you disable such
262 * Marked "noinline" to cause control flow change and thus insn cache
263 * to refetch changed I$ lines.
265 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
266 struct alt_instr *end)
269 u8 *instr, *replacement;
270 u8 insn_buff[MAX_PATCH_LEN];
272 DPRINTK("alt table %px, -> %px", start, end);
275 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
276 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
277 * During the process, KASAN becomes confused seeing partial LA57
278 * conversion and triggers a false-positive out-of-bound report.
280 * Disable KASAN until the patching is complete.
282 kasan_disable_current();
285 * The scan order should be from start to end. A later scanned
286 * alternative code can overwrite previously scanned alternative code.
287 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
290 * So be careful if you want to change the scan order to any other
293 for (a = start; a < end; a++) {
294 int insn_buff_sz = 0;
295 /* Mask away "NOT" flag bit for feature to test. */
296 u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
298 instr = (u8 *)&a->instr_offset + a->instr_offset;
299 replacement = (u8 *)&a->repl_offset + a->repl_offset;
300 BUG_ON(a->instrlen > sizeof(insn_buff));
301 BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
305 * - feature is present
306 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
307 * patch if feature is *NOT* present.
309 if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
312 DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
313 (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
316 instr, instr, a->instrlen,
317 replacement, a->replacementlen);
319 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
320 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
322 memcpy(insn_buff, replacement, a->replacementlen);
323 insn_buff_sz = a->replacementlen;
326 * 0xe8 is a relative jump; fix the offset.
328 * Instruction length is checked before the opcode to avoid
329 * accessing uninitialized bytes for zero-length replacements.
331 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
332 *(s32 *)(insn_buff + 1) += replacement - instr;
333 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
334 *(s32 *)(insn_buff + 1),
335 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
338 if (a->replacementlen && is_jmp(replacement[0]))
339 recompute_jump(a, instr, replacement, insn_buff);
341 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
342 insn_buff[insn_buff_sz] = 0x90;
344 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
346 text_poke_early(instr, insn_buff, insn_buff_sz);
349 optimize_nops(instr, a->instrlen);
352 kasan_enable_current();
355 static inline bool is_jcc32(struct insn *insn)
357 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
358 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
361 #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
366 static int emit_indirect(int op, int reg, u8 *bytes)
372 case CALL_INSN_OPCODE:
373 modrm = 0x10; /* Reg = 2; CALL r/m */
376 case JMP32_INSN_OPCODE:
377 modrm = 0x20; /* Reg = 4; JMP r/m */
386 bytes[i++] = 0x41; /* REX.B prefix */
390 modrm |= 0xc0; /* Mod = 3 */
393 bytes[i++] = 0xff; /* opcode */
400 * Rewrite the compiler generated retpoline thunk calls.
402 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
403 * indirect instructions, avoiding the extra indirection.
405 * For example, convert:
407 * CALL __x86_indirect_thunk_\reg
413 * It also tries to inline spectre_v2=retpoline,lfence when size permits.
415 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
417 retpoline_thunk_t *target;
421 target = addr + insn->length + insn->immediate.value;
422 reg = target - __x86_indirect_thunk_array;
424 if (WARN_ON_ONCE(reg & ~0xf))
427 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
430 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
431 !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE))
434 op = insn->opcode.bytes[0];
439 * Jcc.d32 __x86_indirect_thunk_\reg
449 if (is_jcc32(insn)) {
450 cc = insn->opcode.bytes[1] & 0xf;
451 cc ^= 1; /* invert condition */
453 bytes[i++] = 0x70 + cc; /* Jcc.d8 */
454 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
456 /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
457 op = JMP32_INSN_OPCODE;
461 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
463 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
466 bytes[i++] = 0xe8; /* LFENCE */
469 ret = emit_indirect(op, reg, bytes + i);
475 * The compiler is supposed to EMIT an INT3 after every unconditional
476 * JMP instruction due to AMD BTC. However, if the compiler is too old
477 * or SLS isn't enabled, we still need an INT3 after indirect JMPs
480 if (op == JMP32_INSN_OPCODE && i < insn->length)
481 bytes[i++] = INT3_INSN_OPCODE;
483 for (; i < insn->length;)
484 bytes[i++] = BYTES_NOP1;
490 * Generated by 'objtool --retpoline'.
492 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
496 for (s = start; s < end; s++) {
497 void *addr = (void *)s + *s;
503 ret = insn_decode_kernel(&insn, addr);
504 if (WARN_ON_ONCE(ret < 0))
507 op1 = insn.opcode.bytes[0];
508 op2 = insn.opcode.bytes[1];
511 case CALL_INSN_OPCODE:
512 case JMP32_INSN_OPCODE:
515 case 0x0f: /* escape */
516 if (op2 >= 0x80 && op2 <= 0x8f)
524 DPRINTK("retpoline at: %pS (%px) len: %d to: %pS",
525 addr, addr, insn.length,
526 addr + insn.length + insn.immediate.value);
528 len = patch_retpoline(addr, &insn, bytes);
529 if (len == insn.length) {
530 optimize_nops(bytes, len);
531 DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
532 DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
533 text_poke_early(addr, bytes, len);
538 #ifdef CONFIG_RETHUNK
541 * Rewrite the compiler generated return thunk tail-calls.
543 * For example, convert:
545 * JMP __x86_return_thunk
551 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
555 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
556 if (x86_return_thunk == __x86_return_thunk)
560 __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
562 bytes[i++] = RET_INSN_OPCODE;
565 for (; i < insn->length;)
566 bytes[i++] = INT3_INSN_OPCODE;
570 void __init_or_module noinline apply_returns(s32 *start, s32 *end)
574 for (s = start; s < end; s++) {
575 void *dest = NULL, *addr = (void *)s + *s;
581 ret = insn_decode_kernel(&insn, addr);
582 if (WARN_ON_ONCE(ret < 0))
585 op = insn.opcode.bytes[0];
586 if (op == JMP32_INSN_OPCODE)
587 dest = addr + insn.length + insn.immediate.value;
589 if (__static_call_fixup(addr, op, dest) ||
590 WARN_ONCE(dest != &__x86_return_thunk,
591 "missing return thunk: %pS-%pS: %*ph",
592 addr, dest, 5, addr))
595 DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
596 addr, addr, insn.length,
597 addr + insn.length + insn.immediate.value);
599 len = patch_return(addr, &insn, bytes);
600 if (len == insn.length) {
601 DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
602 DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
603 text_poke_early(addr, bytes, len);
608 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
609 #endif /* CONFIG_RETHUNK */
611 #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
613 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
614 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
616 #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
618 #ifdef CONFIG_X86_KERNEL_IBT
621 * Generated by: objtool --ibt
623 void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
627 for (s = start; s < end; s++) {
628 u32 endbr, poison = gen_endbr_poison();
629 void *addr = (void *)s + *s;
631 if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
634 if (WARN_ON_ONCE(!is_endbr(endbr)))
637 DPRINTK("ENDBR at: %pS (%px)", addr, addr);
640 * When we have IBT, the lack of ENDBR will trigger #CP
642 DUMP_BYTES(((u8*)addr), 4, "%px: orig: ", addr);
643 DUMP_BYTES(((u8*)&poison), 4, "%px: repl: ", addr);
644 text_poke_early(addr, &poison, 4);
650 void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end) { }
652 #endif /* CONFIG_X86_KERNEL_IBT */
655 static void alternatives_smp_lock(const s32 *start, const s32 *end,
656 u8 *text, u8 *text_end)
660 for (poff = start; poff < end; poff++) {
661 u8 *ptr = (u8 *)poff + *poff;
663 if (!*poff || ptr < text || ptr >= text_end)
665 /* turn DS segment override prefix into lock prefix */
667 text_poke(ptr, ((unsigned char []){0xf0}), 1);
671 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
672 u8 *text, u8 *text_end)
676 for (poff = start; poff < end; poff++) {
677 u8 *ptr = (u8 *)poff + *poff;
679 if (!*poff || ptr < text || ptr >= text_end)
681 /* turn lock prefix into DS segment override prefix */
683 text_poke(ptr, ((unsigned char []){0x3E}), 1);
687 struct smp_alt_module {
688 /* what is this ??? */
692 /* ptrs to lock prefixes */
694 const s32 *locks_end;
696 /* .text segment, needed to avoid patching init code ;) */
700 struct list_head next;
702 static LIST_HEAD(smp_alt_modules);
703 static bool uniproc_patched = false; /* protected by text_mutex */
705 void __init_or_module alternatives_smp_module_add(struct module *mod,
707 void *locks, void *locks_end,
708 void *text, void *text_end)
710 struct smp_alt_module *smp;
712 mutex_lock(&text_mutex);
713 if (!uniproc_patched)
716 if (num_possible_cpus() == 1)
717 /* Don't bother remembering, we'll never have to undo it. */
720 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
722 /* we'll run the (safe but slow) SMP code then ... */
728 smp->locks_end = locks_end;
730 smp->text_end = text_end;
731 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
732 smp->locks, smp->locks_end,
733 smp->text, smp->text_end, smp->name);
735 list_add_tail(&smp->next, &smp_alt_modules);
737 alternatives_smp_unlock(locks, locks_end, text, text_end);
739 mutex_unlock(&text_mutex);
742 void __init_or_module alternatives_smp_module_del(struct module *mod)
744 struct smp_alt_module *item;
746 mutex_lock(&text_mutex);
747 list_for_each_entry(item, &smp_alt_modules, next) {
748 if (mod != item->mod)
750 list_del(&item->next);
754 mutex_unlock(&text_mutex);
757 void alternatives_enable_smp(void)
759 struct smp_alt_module *mod;
761 /* Why bother if there are no other CPUs? */
762 BUG_ON(num_possible_cpus() == 1);
764 mutex_lock(&text_mutex);
766 if (uniproc_patched) {
767 pr_info("switching to SMP code\n");
768 BUG_ON(num_online_cpus() != 1);
769 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
770 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
771 list_for_each_entry(mod, &smp_alt_modules, next)
772 alternatives_smp_lock(mod->locks, mod->locks_end,
773 mod->text, mod->text_end);
774 uniproc_patched = false;
776 mutex_unlock(&text_mutex);
780 * Return 1 if the address range is reserved for SMP-alternatives.
781 * Must hold text_mutex.
783 int alternatives_text_reserved(void *start, void *end)
785 struct smp_alt_module *mod;
787 u8 *text_start = start;
790 lockdep_assert_held(&text_mutex);
792 list_for_each_entry(mod, &smp_alt_modules, next) {
793 if (mod->text > text_end || mod->text_end < text_start)
795 for (poff = mod->locks; poff < mod->locks_end; poff++) {
796 const u8 *ptr = (const u8 *)poff + *poff;
798 if (text_start <= ptr && text_end > ptr)
805 #endif /* CONFIG_SMP */
807 #ifdef CONFIG_PARAVIRT
808 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
809 struct paravirt_patch_site *end)
811 struct paravirt_patch_site *p;
812 char insn_buff[MAX_PATCH_LEN];
814 for (p = start; p < end; p++) {
817 BUG_ON(p->len > MAX_PATCH_LEN);
818 /* prep the buffer with the original instructions */
819 memcpy(insn_buff, p->instr, p->len);
820 used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
822 BUG_ON(used > p->len);
824 /* Pad the rest with nops */
825 add_nops(insn_buff + used, p->len - used);
826 text_poke_early(p->instr, insn_buff, p->len);
829 extern struct paravirt_patch_site __start_parainstructions[],
830 __stop_parainstructions[];
831 #endif /* CONFIG_PARAVIRT */
834 * Self-test for the INT3 based CALL emulation code.
836 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
837 * properly and that there is a stack gap between the INT3 frame and the
838 * previous context. Without this gap doing a virtual PUSH on the interrupted
839 * stack would corrupt the INT3 IRET frame.
841 * See entry_{32,64}.S for more details.
845 * We define the int3_magic() function in assembly to control the calling
846 * convention such that we can 'call' it from assembly.
849 extern void int3_magic(unsigned int *ptr); /* defined in asm */
852 " .pushsection .init.text, \"ax\", @progbits\n"
853 " .type int3_magic, @function\n"
856 " movl $1, (%" _ASM_ARG1 ")\n"
858 " .size int3_magic, .-int3_magic\n"
862 extern void int3_selftest_ip(void); /* defined in asm below */
865 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
867 unsigned long selftest = (unsigned long)&int3_selftest_ip;
868 struct die_args *args = data;
869 struct pt_regs *regs = args->regs;
871 OPTIMIZER_HIDE_VAR(selftest);
873 if (!regs || user_mode(regs))
879 if (regs->ip - INT3_INSN_SIZE != selftest)
882 int3_emulate_call(regs, (unsigned long)&int3_magic);
886 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */
887 static noinline void __init int3_selftest(void)
889 static __initdata struct notifier_block int3_exception_nb = {
890 .notifier_call = int3_exception_notify,
891 .priority = INT_MAX-1, /* last */
893 unsigned int val = 0;
895 BUG_ON(register_die_notifier(&int3_exception_nb));
898 * Basically: int3_magic(&val); but really complicated :-)
900 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
901 * notifier above will emulate CALL for us.
903 asm volatile ("int3_selftest_ip:\n\t"
905 " int3; nop; nop; nop; nop\n\t"
906 : ASM_CALL_CONSTRAINT
907 : __ASM_SEL_RAW(a, D) (&val)
912 unregister_die_notifier(&int3_exception_nb);
915 void __init alternative_instructions(void)
920 * The patching is not fully atomic, so try to avoid local
921 * interruptions that might execute the to be patched code.
922 * Other CPUs are not running.
927 * Don't stop machine check exceptions while patching.
928 * MCEs only happen when something got corrupted and in this
929 * case we must do something about the corruption.
930 * Ignoring it is worse than an unlikely patching race.
931 * Also machine checks tend to be broadcast and if one CPU
932 * goes into machine check the others follow quickly, so we don't
933 * expect a machine check to cause undue problems during to code
938 * Paravirt patching and alternative patching can be combined to
939 * replace a function call with a short direct code sequence (e.g.
940 * by setting a constant return value instead of doing that in an
941 * external function).
942 * In order to make this work the following sequence is required:
943 * 1. set (artificial) features depending on used paravirt
944 * functions which can later influence alternative patching
945 * 2. apply paravirt patching (generally replacing an indirect
946 * function call with a direct one)
947 * 3. apply alternative patching (e.g. replacing a direct function
948 * call with a custom code sequence)
949 * Doing paravirt patching after alternative patching would clobber
950 * the optimization of the custom code with a function call again.
955 * First patch paravirt functions, such that we overwrite the indirect
956 * call with the direct call.
958 apply_paravirt(__parainstructions, __parainstructions_end);
961 * Rewrite the retpolines, must be done before alternatives since
962 * those can rewrite the retpoline thunks.
964 apply_retpolines(__retpoline_sites, __retpoline_sites_end);
965 apply_returns(__return_sites, __return_sites_end);
968 * Then patch alternatives, such that those paravirt calls that are in
969 * alternatives can be overwritten by their immediate fragments.
971 apply_alternatives(__alt_instructions, __alt_instructions_end);
973 apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
976 /* Patch to UP if other cpus not imminent. */
977 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
978 uniproc_patched = true;
979 alternatives_smp_module_add(NULL, "core kernel",
980 __smp_locks, __smp_locks_end,
984 if (!uniproc_patched || num_possible_cpus() == 1) {
985 free_init_pages("SMP alternatives",
986 (unsigned long)__smp_locks,
987 (unsigned long)__smp_locks_end);
992 alternatives_patched = 1;
996 * text_poke_early - Update instructions on a live kernel at boot time
997 * @addr: address to modify
998 * @opcode: source of the copy
999 * @len: length to copy
1001 * When you use this code to patch more than one byte of an instruction
1002 * you need to make sure that other CPUs cannot execute this code in parallel.
1003 * Also no thread must be currently preempted in the middle of these
1004 * instructions. And on the local CPU you need to be protected against NMI or
1005 * MCE handlers seeing an inconsistent instruction while you patch.
1007 void __init_or_module text_poke_early(void *addr, const void *opcode,
1010 unsigned long flags;
1012 if (boot_cpu_has(X86_FEATURE_NX) &&
1013 is_module_text_address((unsigned long)addr)) {
1015 * Modules text is marked initially as non-executable, so the
1016 * code cannot be running and speculative code-fetches are
1017 * prevented. Just change the code.
1019 memcpy(addr, opcode, len);
1021 local_irq_save(flags);
1022 memcpy(addr, opcode, len);
1024 local_irq_restore(flags);
1027 * Could also do a CLFLUSH here to speed up CPU recovery; but
1028 * that causes hangs on some VIA CPUs.
1034 struct mm_struct *mm;
1038 * Using a temporary mm allows to set temporary mappings that are not accessible
1039 * by other CPUs. Such mappings are needed to perform sensitive memory writes
1040 * that override the kernel memory protections (e.g., W^X), without exposing the
1041 * temporary page-table mappings that are required for these write operations to
1042 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1043 * mapping is torn down.
1045 * Context: The temporary mm needs to be used exclusively by a single core. To
1046 * harden security IRQs must be disabled while the temporary mm is
1047 * loaded, thereby preventing interrupt handler bugs from overriding
1048 * the kernel memory protection.
1050 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1052 temp_mm_state_t temp_state;
1054 lockdep_assert_irqs_disabled();
1057 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1058 * with a stale address space WITHOUT being in lazy mode after
1059 * restoring the previous mm.
1061 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1062 leave_mm(smp_processor_id());
1064 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1065 switch_mm_irqs_off(NULL, mm, current);
1068 * If breakpoints are enabled, disable them while the temporary mm is
1069 * used. Userspace might set up watchpoints on addresses that are used
1070 * in the temporary mm, which would lead to wrong signals being sent or
1073 * Note that breakpoints are not disabled selectively, which also causes
1074 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1075 * undesirable, but still seems reasonable as the code that runs in the
1076 * temporary mm should be short.
1078 if (hw_breakpoint_active())
1079 hw_breakpoint_disable();
1084 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1086 lockdep_assert_irqs_disabled();
1087 switch_mm_irqs_off(NULL, prev_state.mm, current);
1090 * Restore the breakpoints if they were disabled before the temporary mm
1093 if (hw_breakpoint_active())
1094 hw_breakpoint_restore();
1097 __ro_after_init struct mm_struct *poking_mm;
1098 __ro_after_init unsigned long poking_addr;
1100 static void text_poke_memcpy(void *dst, const void *src, size_t len)
1102 memcpy(dst, src, len);
1105 static void text_poke_memset(void *dst, const void *src, size_t len)
1107 int c = *(const int *)src;
1109 memset(dst, c, len);
1112 typedef void text_poke_f(void *dst, const void *src, size_t len);
1114 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1116 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1117 struct page *pages[2] = {NULL};
1118 temp_mm_state_t prev;
1119 unsigned long flags;
1125 * While boot memory allocator is running we cannot use struct pages as
1126 * they are not yet initialized. There is no way to recover.
1128 BUG_ON(!after_bootmem);
1130 if (!core_kernel_text((unsigned long)addr)) {
1131 pages[0] = vmalloc_to_page(addr);
1132 if (cross_page_boundary)
1133 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1135 pages[0] = virt_to_page(addr);
1136 WARN_ON(!PageReserved(pages[0]));
1137 if (cross_page_boundary)
1138 pages[1] = virt_to_page(addr + PAGE_SIZE);
1141 * If something went wrong, crash and burn since recovery paths are not
1144 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1147 * Map the page without the global bit, as TLB flushing is done with
1148 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1150 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1153 * The lock is not really needed, but this allows to avoid open-coding.
1155 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1158 * This must not fail; preallocated in poking_init().
1162 local_irq_save(flags);
1164 pte = mk_pte(pages[0], pgprot);
1165 set_pte_at(poking_mm, poking_addr, ptep, pte);
1167 if (cross_page_boundary) {
1168 pte = mk_pte(pages[1], pgprot);
1169 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1173 * Loading the temporary mm behaves as a compiler barrier, which
1174 * guarantees that the PTE will be set at the time memcpy() is done.
1176 prev = use_temporary_mm(poking_mm);
1178 kasan_disable_current();
1179 func((u8 *)poking_addr + offset_in_page(addr), src, len);
1180 kasan_enable_current();
1183 * Ensure that the PTE is only cleared after the instructions of memcpy
1184 * were issued by using a compiler barrier.
1188 pte_clear(poking_mm, poking_addr, ptep);
1189 if (cross_page_boundary)
1190 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1193 * Loading the previous page-table hierarchy requires a serializing
1194 * instruction that already allows the core to see the updated version.
1195 * Xen-PV is assumed to serialize execution in a similar manner.
1197 unuse_temporary_mm(prev);
1200 * Flushing the TLB might involve IPIs, which would require enabled
1201 * IRQs, but not if the mm is not used, as it is in this point.
1203 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1204 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1207 if (func == text_poke_memcpy) {
1209 * If the text does not match what we just wrote then something is
1210 * fundamentally screwy; there's nothing we can really do about that.
1212 BUG_ON(memcmp(addr, src, len));
1215 local_irq_restore(flags);
1216 pte_unmap_unlock(ptep, ptl);
1221 * text_poke - Update instructions on a live kernel
1222 * @addr: address to modify
1223 * @opcode: source of the copy
1224 * @len: length to copy
1226 * Only atomic text poke/set should be allowed when not doing early patching.
1227 * It means the size must be writable atomically and the address must be aligned
1228 * in a way that permits an atomic write. It also makes sure we fit on a single
1231 * Note that the caller must ensure that if the modified code is part of a
1232 * module, the module would not be removed during poking. This can be achieved
1233 * by registering a module notifier, and ordering module removal and patching
1236 void *text_poke(void *addr, const void *opcode, size_t len)
1238 lockdep_assert_held(&text_mutex);
1240 return __text_poke(text_poke_memcpy, addr, opcode, len);
1244 * text_poke_kgdb - Update instructions on a live kernel by kgdb
1245 * @addr: address to modify
1246 * @opcode: source of the copy
1247 * @len: length to copy
1249 * Only atomic text poke/set should be allowed when not doing early patching.
1250 * It means the size must be writable atomically and the address must be aligned
1251 * in a way that permits an atomic write. It also makes sure we fit on a single
1254 * Context: should only be used by kgdb, which ensures no other core is running,
1255 * despite the fact it does not hold the text_mutex.
1257 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
1259 return __text_poke(text_poke_memcpy, addr, opcode, len);
1263 * text_poke_copy - Copy instructions into (an unused part of) RX memory
1264 * @addr: address to modify
1265 * @opcode: source of the copy
1266 * @len: length to copy, could be more than 2x PAGE_SIZE
1268 * Not safe against concurrent execution; useful for JITs to dump
1269 * new code blocks into unused regions of RX memory. Can be used in
1270 * conjunction with synchronize_rcu_tasks() to wait for existing
1271 * execution to quiesce after having made sure no existing functions
1272 * pointers are live.
1274 void *text_poke_copy(void *addr, const void *opcode, size_t len)
1276 unsigned long start = (unsigned long)addr;
1279 if (WARN_ON_ONCE(core_kernel_text(start)))
1282 mutex_lock(&text_mutex);
1283 while (patched < len) {
1284 unsigned long ptr = start + patched;
1287 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
1289 __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
1292 mutex_unlock(&text_mutex);
1297 * text_poke_set - memset into (an unused part of) RX memory
1298 * @addr: address to modify
1299 * @c: the byte to fill the area with
1300 * @len: length to copy, could be more than 2x PAGE_SIZE
1302 * This is useful to overwrite unused regions of RX memory with illegal
1305 void *text_poke_set(void *addr, int c, size_t len)
1307 unsigned long start = (unsigned long)addr;
1310 if (WARN_ON_ONCE(core_kernel_text(start)))
1313 mutex_lock(&text_mutex);
1314 while (patched < len) {
1315 unsigned long ptr = start + patched;
1318 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
1320 __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
1323 mutex_unlock(&text_mutex);
1327 static void do_sync_core(void *info)
1332 void text_poke_sync(void)
1334 on_each_cpu(do_sync_core, NULL, 1);
1338 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
1339 * this thing. When len == 6 everything is prefixed with 0x0f and we map
1340 * opcode to Jcc.d8, using len to distinguish.
1342 struct text_poke_loc {
1343 /* addr := _stext + rel_addr */
1348 const u8 text[POKE_MAX_OPCODE_SIZE];
1349 /* see text_poke_bp_batch() */
1353 struct bp_patching_desc {
1354 struct text_poke_loc *vec;
1359 static struct bp_patching_desc bp_desc;
1361 static __always_inline
1362 struct bp_patching_desc *try_get_desc(void)
1364 struct bp_patching_desc *desc = &bp_desc;
1366 if (!arch_atomic_inc_not_zero(&desc->refs))
1372 static __always_inline void put_desc(void)
1374 struct bp_patching_desc *desc = &bp_desc;
1376 smp_mb__before_atomic();
1377 arch_atomic_dec(&desc->refs);
1380 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1382 return _stext + tp->rel_addr;
1385 static __always_inline int patch_cmp(const void *key, const void *elt)
1387 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1389 if (key < text_poke_addr(tp))
1391 if (key > text_poke_addr(tp))
1396 noinstr int poke_int3_handler(struct pt_regs *regs)
1398 struct bp_patching_desc *desc;
1399 struct text_poke_loc *tp;
1403 if (user_mode(regs))
1407 * Having observed our INT3 instruction, we now must observe
1408 * bp_desc with non-zero refcount:
1410 * bp_desc.refs = 1 INT3
1412 * write INT3 if (bp_desc.refs != 0)
1416 desc = try_get_desc();
1421 * Discount the INT3. See text_poke_bp_batch().
1423 ip = (void *) regs->ip - INT3_INSN_SIZE;
1426 * Skip the binary search if there is a single member in the vector.
1428 if (unlikely(desc->nr_entries > 1)) {
1429 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1430 sizeof(struct text_poke_loc),
1436 if (text_poke_addr(tp) != ip)
1442 switch (tp->opcode) {
1443 case INT3_INSN_OPCODE:
1445 * Someone poked an explicit INT3, they'll want to handle it,
1450 case RET_INSN_OPCODE:
1451 int3_emulate_ret(regs);
1454 case CALL_INSN_OPCODE:
1455 int3_emulate_call(regs, (long)ip + tp->disp);
1458 case JMP32_INSN_OPCODE:
1459 case JMP8_INSN_OPCODE:
1460 int3_emulate_jmp(regs, (long)ip + tp->disp);
1463 case 0x70 ... 0x7f: /* Jcc */
1464 int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
1478 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1479 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1480 static int tp_vec_nr;
1483 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1484 * @tp: vector of instructions to patch
1485 * @nr_entries: number of entries in the vector
1487 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1488 * We completely avoid stop_machine() here, and achieve the
1489 * synchronization using int3 breakpoint.
1491 * The way it is done:
1492 * - For each entry in the vector:
1493 * - add a int3 trap to the address that will be patched
1495 * - For each entry in the vector:
1496 * - update all but the first byte of the patched range
1498 * - For each entry in the vector:
1499 * - replace the first byte (int3) by the first byte of
1503 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1505 unsigned char int3 = INT3_INSN_OPCODE;
1509 lockdep_assert_held(&text_mutex);
1512 bp_desc.nr_entries = nr_entries;
1515 * Corresponds to the implicit memory barrier in try_get_desc() to
1516 * ensure reading a non-zero refcount provides up to date bp_desc data.
1518 atomic_set_release(&bp_desc.refs, 1);
1521 * Corresponding read barrier in int3 notifier for making sure the
1522 * nr_entries and handler are correctly ordered wrt. patching.
1527 * First step: add a int3 trap to the address that will be patched.
1529 for (i = 0; i < nr_entries; i++) {
1530 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1531 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1537 * Second step: update all but the first byte of the patched range.
1539 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1540 u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
1541 u8 _new[POKE_MAX_OPCODE_SIZE+1];
1542 const u8 *new = tp[i].text;
1543 int len = tp[i].len;
1545 if (len - INT3_INSN_SIZE > 0) {
1546 memcpy(old + INT3_INSN_SIZE,
1547 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1548 len - INT3_INSN_SIZE);
1552 memcpy(_new + 1, new, 5);
1556 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1557 new + INT3_INSN_SIZE,
1558 len - INT3_INSN_SIZE);
1564 * Emit a perf event to record the text poke, primarily to
1565 * support Intel PT decoding which must walk the executable code
1566 * to reconstruct the trace. The flow up to here is:
1569 * - write instruction tail
1570 * At this point the actual control flow will be through the
1571 * INT3 and handler and not hit the old or new instruction.
1572 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1573 * can still be decoded. Subsequently:
1574 * - emit RECORD_TEXT_POKE with the new instruction
1576 * - write first byte
1578 * So before the text poke event timestamp, the decoder will see
1579 * either the old instruction flow or FUP/TIP of INT3. After the
1580 * text poke event timestamp, the decoder will see either the
1581 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1582 * use the timestamp as the point at which to modify the
1584 * The old instruction is recorded so that the event can be
1585 * processed forwards or backwards.
1587 perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
1592 * According to Intel, this core syncing is very likely
1593 * not necessary and we'd be safe even without it. But
1594 * better safe than sorry (plus there's not only Intel).
1600 * Third step: replace the first byte (int3) by the first byte of
1603 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1604 u8 byte = tp[i].text[0];
1609 if (byte == INT3_INSN_OPCODE)
1612 text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
1620 * Remove and wait for refs to be zero.
1622 if (!atomic_dec_and_test(&bp_desc.refs))
1623 atomic_cond_read_acquire(&bp_desc.refs, !VAL);
1626 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1627 const void *opcode, size_t len, const void *emulate)
1634 memcpy((void *)tp->text, opcode+i, len-i);
1638 ret = insn_decode_kernel(&insn, emulate);
1641 tp->rel_addr = addr - (void *)_stext;
1643 tp->opcode = insn.opcode.bytes[0];
1645 if (is_jcc32(&insn)) {
1647 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
1649 tp->opcode = insn.opcode.bytes[1] - 0x10;
1652 switch (tp->opcode) {
1653 case RET_INSN_OPCODE:
1654 case JMP32_INSN_OPCODE:
1655 case JMP8_INSN_OPCODE:
1657 * Control flow instructions without implied execution of the
1658 * next instruction can be padded with INT3.
1660 for (i = insn.length; i < len; i++)
1661 BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
1665 BUG_ON(len != insn.length);
1668 switch (tp->opcode) {
1669 case INT3_INSN_OPCODE:
1670 case RET_INSN_OPCODE:
1673 case CALL_INSN_OPCODE:
1674 case JMP32_INSN_OPCODE:
1675 case JMP8_INSN_OPCODE:
1676 case 0x70 ... 0x7f: /* Jcc */
1677 tp->disp = insn.immediate.value;
1680 default: /* assume NOP */
1682 case 2: /* NOP2 -- emulate as JMP8+0 */
1683 BUG_ON(memcmp(emulate, x86_nops[len], len));
1684 tp->opcode = JMP8_INSN_OPCODE;
1688 case 5: /* NOP5 -- emulate as JMP32+0 */
1689 BUG_ON(memcmp(emulate, x86_nops[len], len));
1690 tp->opcode = JMP32_INSN_OPCODE;
1694 default: /* unknown instruction */
1702 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1705 static bool tp_order_fail(void *addr)
1707 struct text_poke_loc *tp;
1712 if (!addr) /* force */
1715 tp = &tp_vec[tp_vec_nr - 1];
1716 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1722 static void text_poke_flush(void *addr)
1724 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1725 text_poke_bp_batch(tp_vec, tp_vec_nr);
1730 void text_poke_finish(void)
1732 text_poke_flush(NULL);
1735 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1737 struct text_poke_loc *tp;
1739 if (unlikely(system_state == SYSTEM_BOOTING)) {
1740 text_poke_early(addr, opcode, len);
1744 text_poke_flush(addr);
1746 tp = &tp_vec[tp_vec_nr++];
1747 text_poke_loc_init(tp, addr, opcode, len, emulate);
1751 * text_poke_bp() -- update instructions on live kernel on SMP
1752 * @addr: address to patch
1753 * @opcode: opcode of new instruction
1754 * @len: length to copy
1755 * @emulate: instruction to be emulated
1757 * Update a single instruction with the vector in the stack, avoiding
1758 * dynamically allocated memory. This function should be used when it is
1759 * not possible to allocate memory.
1761 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1763 struct text_poke_loc tp;
1765 if (unlikely(system_state == SYSTEM_BOOTING)) {
1766 text_poke_early(addr, opcode, len);
1770 text_poke_loc_init(&tp, addr, opcode, len, emulate);
1771 text_poke_bp_batch(&tp, 1);