1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
30 #include <asm/fixmap.h>
31 #include <asm/paravirt.h>
32 #include <asm/asm-prototypes.h>
35 int __read_mostly alternatives_patched;
37 EXPORT_SYMBOL_GPL(alternatives_patched);
39 #define MAX_PATCH_LEN (255-1)
44 #define DA_RETPOLINE 0x04
48 static unsigned int __initdata_or_module debug_alternative;
50 static int __init debug_alt(char *str)
52 if (str && *str == '=')
55 if (!str || kstrtouint(str, 0, &debug_alternative))
56 debug_alternative = DA_ALL;
60 __setup("debug-alternative", debug_alt);
62 static int noreplace_smp;
64 static int __init setup_noreplace_smp(char *str)
69 __setup("noreplace-smp", setup_noreplace_smp);
71 #define DPRINTK(type, fmt, args...) \
73 if (debug_alternative & DA_##type) \
74 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
77 #define DUMP_BYTES(type, buf, len, fmt, args...) \
79 if (unlikely(debug_alternative & DA_##type)) { \
85 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
86 for (j = 0; j < (len) - 1; j++) \
87 printk(KERN_CONT "%02hhx ", buf[j]); \
88 printk(KERN_CONT "%02hhx\n", buf[j]); \
92 static const unsigned char x86nops[] =
109 const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
116 x86nops + 1 + 2 + 3 + 4,
117 x86nops + 1 + 2 + 3 + 4 + 5,
118 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
119 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
121 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
122 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
123 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
128 * Fill the buffer with a single effective instruction of size @len.
130 * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
131 * for every single-byte NOP, try to generate the maximally available NOP of
132 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
133 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
134 * *jump* over instead of executing long and daft NOPs.
136 static void __init_or_module add_nop(u8 *instr, unsigned int len)
138 u8 *target = instr + len;
143 if (len <= ASM_NOP_MAX) {
144 memcpy(instr, x86_nops[len], len);
149 __text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE);
150 instr += JMP8_INSN_SIZE;
152 __text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE);
153 instr += JMP32_INSN_SIZE;
156 for (;instr < target; instr++)
157 *instr = INT3_INSN_OPCODE;
160 extern s32 __retpoline_sites[], __retpoline_sites_end[];
161 extern s32 __return_sites[], __return_sites_end[];
162 extern s32 __cfi_sites[], __cfi_sites_end[];
163 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
164 extern s32 __smp_locks[], __smp_locks_end[];
165 void text_poke_early(void *addr, const void *opcode, size_t len);
168 * Matches NOP and NOPL, not any of the other possible NOPs.
170 static bool insn_is_nop(struct insn *insn)
172 /* Anything NOP, but no REP NOP */
173 if (insn->opcode.bytes[0] == 0x90 &&
174 (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3))
178 if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F)
181 /* TODO: more nops */
187 * Find the offset of the first non-NOP instruction starting at @offset
188 * but no further than @len.
190 static int skip_nops(u8 *instr, int offset, int len)
194 for (; offset < len; offset += insn.length) {
195 if (insn_decode_kernel(&insn, &instr[offset]))
198 if (!insn_is_nop(&insn))
206 * Optimize a sequence of NOPs, possibly preceded by an unconditional jump
207 * to the end of the NOP sequence into a single NOP.
209 static bool __init_or_module
210 __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target)
212 int i = *next - insn->length;
214 switch (insn->opcode.bytes[0]) {
215 case JMP8_INSN_OPCODE:
216 case JMP32_INSN_OPCODE:
218 *target = *next + insn->immediate.value;
222 if (insn_is_nop(insn)) {
225 *next = skip_nops(instr, *next, len);
226 if (*target && *next == *target)
229 add_nop(instr + nop, *next - nop);
230 DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
239 * "noinline" to cause control flow change and thus invalidate I$ and
240 * cause refetch after modification.
242 static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
244 int prev, target = 0;
246 for (int next, i = 0; i < len; i = next) {
249 if (insn_decode_kernel(&insn, &instr[i]))
252 next = i + insn.length;
254 __optimize_nops(instr, len, &insn, &next, &prev, &target);
258 static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
262 local_irq_save(flags);
263 optimize_nops(instr, len);
265 local_irq_restore(flags);
269 * In this context, "source" is where the instructions are placed in the
270 * section .altinstr_replacement, for example during kernel build by the
272 * "Destination" is where the instructions are being patched in by this
275 * The source offset is:
277 * src_imm = target - src_next_ip (1)
279 * and the target offset is:
281 * dst_imm = target - dst_next_ip (2)
283 * so rework (1) as an expression for target like:
285 * target = src_imm + src_next_ip (1a)
287 * and substitute in (2) to get:
289 * dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
291 * Now, since the instruction stream is 'identical' at src and dst (it
292 * is being copied after all) it can be stated that:
294 * src_next_ip = src + ip_offset
295 * dst_next_ip = dst + ip_offset (4)
297 * Substitute (4) in (3) and observe ip_offset being cancelled out to
300 * dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
301 * = src_imm + src - dst + ip_offset - ip_offset
302 * = src_imm + src - dst (5)
304 * IOW, only the relative displacement of the code block matters.
307 #define apply_reloc_n(n_, p_, d_) \
309 s32 v = *(s##n_ *)(p_); \
311 BUG_ON((v >> 31) != (v >> (n_-1))); \
312 *(s##n_ *)(p_) = (s##n_)v; \
316 static __always_inline
317 void apply_reloc(int n, void *ptr, uintptr_t diff)
320 case 1: apply_reloc_n(8, ptr, diff); break;
321 case 2: apply_reloc_n(16, ptr, diff); break;
322 case 4: apply_reloc_n(32, ptr, diff); break;
327 static __always_inline
328 bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
330 u8 *target = src + offset;
332 * If the target is inside the patched block, it's relative to the
333 * block itself and does not need relocation.
335 return (target < src || target > src + src_len);
338 static void __init_or_module noinline
339 apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
341 int prev, target = 0;
343 for (int next, i = 0; i < len; i = next) {
346 if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
349 next = i + insn.length;
351 if (__optimize_nops(buf, len, &insn, &next, &prev, &target))
354 switch (insn.opcode.bytes[0]) {
356 if (insn.opcode.bytes[1] < 0x80 ||
357 insn.opcode.bytes[1] > 0x8f)
360 fallthrough; /* Jcc.d32 */
361 case 0x70 ... 0x7f: /* Jcc.d8 */
362 case JMP8_INSN_OPCODE:
363 case JMP32_INSN_OPCODE:
364 case CALL_INSN_OPCODE:
365 if (need_reloc(next + insn.immediate.value, src, src_len)) {
366 apply_reloc(insn.immediate.nbytes,
367 buf + i + insn_offset_immediate(&insn),
372 * Where possible, convert JMP.d32 into JMP.d8.
374 if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
375 s32 imm = insn.immediate.value;
377 imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
378 if ((imm >> 31) == (imm >> 7)) {
379 buf[i+0] = JMP8_INSN_OPCODE;
382 memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2);
388 if (insn_rip_relative(&insn)) {
389 if (need_reloc(next + insn.displacement.value, src, src_len)) {
390 apply_reloc(insn.displacement.nbytes,
391 buf + i + insn_offset_displacement(&insn),
398 /* Low-level backend functions usable from alternative code replacements. */
399 DEFINE_ASM_FUNC(nop_func, "", .entry.text);
400 EXPORT_SYMBOL_GPL(nop_func);
402 noinstr void BUG_func(void)
406 EXPORT_SYMBOL(BUG_func);
408 #define CALL_RIP_REL_OPCODE 0xff
409 #define CALL_RIP_REL_MODRM 0x15
412 * Rewrite the "call BUG_func" replacement to point to the target of the
413 * indirect pv_ops call "call *disp(%ip)".
415 static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
417 void *target, *bug = &BUG_func;
420 if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
421 pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
425 if (a->instrlen != 6 ||
426 instr[0] != CALL_RIP_REL_OPCODE ||
427 instr[1] != CALL_RIP_REL_MODRM) {
428 pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
432 /* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
433 disp = *(s32 *)(instr + 2);
435 /* ff 15 00 00 00 00 call *0x0(%rip) */
436 /* target address is stored at "next instruction + disp". */
437 target = *(void **)(instr + a->instrlen + disp);
439 /* ff 15 00 00 00 00 call *0x0 */
440 /* target address is stored at disp. */
441 target = *(void **)disp;
446 /* (BUG_func - .) + (target - BUG_func) := target - . */
447 *(s32 *)(insn_buff + 1) += target - bug;
449 if (target == &nop_func)
456 * Replace instructions with better alternatives for this CPU type. This runs
457 * before SMP is initialized to avoid SMP problems with self modifying code.
458 * This implies that asymmetric systems where APs have less capabilities than
459 * the boot processor are not handled. Tough. Make sure you disable such
462 * Marked "noinline" to cause control flow change and thus insn cache
463 * to refetch changed I$ lines.
465 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
466 struct alt_instr *end)
469 u8 *instr, *replacement;
470 u8 insn_buff[MAX_PATCH_LEN];
472 DPRINTK(ALT, "alt table %px, -> %px", start, end);
475 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
476 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
477 * During the process, KASAN becomes confused seeing partial LA57
478 * conversion and triggers a false-positive out-of-bound report.
480 * Disable KASAN until the patching is complete.
482 kasan_disable_current();
485 * The scan order should be from start to end. A later scanned
486 * alternative code can overwrite previously scanned alternative code.
487 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
490 * So be careful if you want to change the scan order to any other
493 for (a = start; a < end; a++) {
494 int insn_buff_sz = 0;
496 instr = (u8 *)&a->instr_offset + a->instr_offset;
497 replacement = (u8 *)&a->repl_offset + a->repl_offset;
498 BUG_ON(a->instrlen > sizeof(insn_buff));
499 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
503 * - feature is present
504 * - feature not present but ALT_FLAG_NOT is set to mean,
505 * patch if feature is *NOT* present.
507 if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
508 optimize_nops_inplace(instr, a->instrlen);
512 DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
515 instr, instr, a->instrlen,
516 replacement, a->replacementlen, a->flags);
518 memcpy(insn_buff, replacement, a->replacementlen);
519 insn_buff_sz = a->replacementlen;
521 if (a->flags & ALT_FLAG_DIRECT_CALL) {
522 insn_buff_sz = alt_replace_call(instr, insn_buff, a);
523 if (insn_buff_sz < 0)
527 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
528 insn_buff[insn_buff_sz] = 0x90;
530 apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen);
532 DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
533 DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
534 DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
536 text_poke_early(instr, insn_buff, insn_buff_sz);
539 kasan_enable_current();
542 static inline bool is_jcc32(struct insn *insn)
544 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
545 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
548 #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
553 static int emit_indirect(int op, int reg, u8 *bytes)
559 case CALL_INSN_OPCODE:
560 modrm = 0x10; /* Reg = 2; CALL r/m */
563 case JMP32_INSN_OPCODE:
564 modrm = 0x20; /* Reg = 4; JMP r/m */
573 bytes[i++] = 0x41; /* REX.B prefix */
577 modrm |= 0xc0; /* Mod = 3 */
580 bytes[i++] = 0xff; /* opcode */
586 static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
588 u8 op = insn->opcode.bytes[0];
592 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
593 * tail-calls. Deal with them.
595 if (is_jcc32(insn)) {
597 op = insn->opcode.bytes[1];
601 if (insn->length == 6)
602 bytes[i++] = 0x2e; /* CS-prefix */
605 case CALL_INSN_OPCODE:
606 __text_gen_insn(bytes+i, op, addr+i,
607 __x86_indirect_call_thunk_array[reg],
612 case JMP32_INSN_OPCODE:
614 __text_gen_insn(bytes+i, op, addr+i,
615 __x86_indirect_jump_thunk_array[reg],
617 i += JMP32_INSN_SIZE;
621 WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
625 WARN_ON_ONCE(i != insn->length);
631 * Rewrite the compiler generated retpoline thunk calls.
633 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
634 * indirect instructions, avoiding the extra indirection.
636 * For example, convert:
638 * CALL __x86_indirect_thunk_\reg
644 * It also tries to inline spectre_v2=retpoline,lfence when size permits.
646 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
648 retpoline_thunk_t *target;
652 target = addr + insn->length + insn->immediate.value;
653 reg = target - __x86_indirect_thunk_array;
655 if (WARN_ON_ONCE(reg & ~0xf))
658 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
661 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
662 !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
663 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
664 return emit_call_track_retpoline(addr, insn, reg, bytes);
669 op = insn->opcode.bytes[0];
674 * Jcc.d32 __x86_indirect_thunk_\reg
684 if (is_jcc32(insn)) {
685 cc = insn->opcode.bytes[1] & 0xf;
686 cc ^= 1; /* invert condition */
688 bytes[i++] = 0x70 + cc; /* Jcc.d8 */
689 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
691 /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
692 op = JMP32_INSN_OPCODE;
696 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
698 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
701 bytes[i++] = 0xe8; /* LFENCE */
704 ret = emit_indirect(op, reg, bytes + i);
710 * The compiler is supposed to EMIT an INT3 after every unconditional
711 * JMP instruction due to AMD BTC. However, if the compiler is too old
712 * or SLS isn't enabled, we still need an INT3 after indirect JMPs
715 if (op == JMP32_INSN_OPCODE && i < insn->length)
716 bytes[i++] = INT3_INSN_OPCODE;
718 for (; i < insn->length;)
719 bytes[i++] = BYTES_NOP1;
725 * Generated by 'objtool --retpoline'.
727 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
731 for (s = start; s < end; s++) {
732 void *addr = (void *)s + *s;
738 ret = insn_decode_kernel(&insn, addr);
739 if (WARN_ON_ONCE(ret < 0))
742 op1 = insn.opcode.bytes[0];
743 op2 = insn.opcode.bytes[1];
746 case CALL_INSN_OPCODE:
747 case JMP32_INSN_OPCODE:
750 case 0x0f: /* escape */
751 if (op2 >= 0x80 && op2 <= 0x8f)
759 DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS",
760 addr, addr, insn.length,
761 addr + insn.length + insn.immediate.value);
763 len = patch_retpoline(addr, &insn, bytes);
764 if (len == insn.length) {
765 optimize_nops(bytes, len);
766 DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr);
767 DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
768 text_poke_early(addr, bytes, len);
773 #ifdef CONFIG_RETHUNK
776 * Rewrite the compiler generated return thunk tail-calls.
778 * For example, convert:
780 * JMP __x86_return_thunk
786 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
790 /* Patch the custom return thunks... */
791 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
793 __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
795 /* ... or patch them out if not needed. */
796 bytes[i++] = RET_INSN_OPCODE;
799 for (; i < insn->length;)
800 bytes[i++] = INT3_INSN_OPCODE;
804 void __init_or_module noinline apply_returns(s32 *start, s32 *end)
808 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
809 static_call_force_reinit();
811 for (s = start; s < end; s++) {
812 void *dest = NULL, *addr = (void *)s + *s;
818 ret = insn_decode_kernel(&insn, addr);
819 if (WARN_ON_ONCE(ret < 0))
822 op = insn.opcode.bytes[0];
823 if (op == JMP32_INSN_OPCODE)
824 dest = addr + insn.length + insn.immediate.value;
826 if (__static_call_fixup(addr, op, dest) ||
827 WARN_ONCE(dest != &__x86_return_thunk,
828 "missing return thunk: %pS-%pS: %*ph",
829 addr, dest, 5, addr))
832 DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS",
833 addr, addr, insn.length,
834 addr + insn.length + insn.immediate.value);
836 len = patch_return(addr, &insn, bytes);
837 if (len == insn.length) {
838 DUMP_BYTES(RET, ((u8*)addr), len, "%px: orig: ", addr);
839 DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
840 text_poke_early(addr, bytes, len);
845 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
846 #endif /* CONFIG_RETHUNK */
848 #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
850 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
851 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
853 #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
855 #ifdef CONFIG_X86_KERNEL_IBT
857 static void poison_cfi(void *addr);
859 static void __init_or_module poison_endbr(void *addr, bool warn)
861 u32 endbr, poison = gen_endbr_poison();
863 if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
866 if (!is_endbr(endbr)) {
871 DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
874 * When we have IBT, the lack of ENDBR will trigger #CP
876 DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
877 DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
878 text_poke_early(addr, &poison, 4);
882 * Generated by: objtool --ibt
884 * Seal the functions for indirect calls by clobbering the ENDBR instructions
885 * and the kCFI hash value.
887 void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
891 for (s = start; s < end; s++) {
892 void *addr = (void *)s + *s;
894 poison_endbr(addr, true);
895 if (IS_ENABLED(CONFIG_FINEIBT))
896 poison_cfi(addr - 16);
902 void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
904 #endif /* CONFIG_X86_KERNEL_IBT */
906 #ifdef CONFIG_FINEIBT
907 #define __CFI_DEFAULT CFI_DEFAULT
908 #elif defined(CONFIG_CFI_CLANG)
909 #define __CFI_DEFAULT CFI_KCFI
911 #define __CFI_DEFAULT CFI_OFF
914 enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
916 #ifdef CONFIG_CFI_CLANG
919 /* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
920 extern unsigned int __bpf_prog_runX(const void *ctx,
921 const struct bpf_insn *insn);
924 * Force a reference to the external symbol so the compiler generates
927 __ADDRESSABLE(__bpf_prog_runX);
929 /* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
931 " .pushsection .data..ro_after_init,\"aw\",@progbits \n"
932 " .type cfi_bpf_hash,@object \n"
933 " .globl cfi_bpf_hash \n"
934 " .p2align 2, 0x0 \n"
936 " .long __kcfi_typeid___bpf_prog_runX \n"
937 " .size cfi_bpf_hash, 4 \n"
941 /* Must match bpf_callback_t */
942 extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
944 __ADDRESSABLE(__bpf_callback_fn);
946 /* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
948 " .pushsection .data..ro_after_init,\"aw\",@progbits \n"
949 " .type cfi_bpf_subprog_hash,@object \n"
950 " .globl cfi_bpf_subprog_hash \n"
951 " .p2align 2, 0x0 \n"
952 "cfi_bpf_subprog_hash: \n"
953 " .long __kcfi_typeid___bpf_callback_fn \n"
954 " .size cfi_bpf_subprog_hash, 4 \n"
958 u32 cfi_get_func_hash(void *func)
962 func -= cfi_get_offset();
974 if (get_kernel_nofault(hash, func))
981 #ifdef CONFIG_FINEIBT
983 static bool cfi_rand __ro_after_init = true;
984 static u32 cfi_seed __ro_after_init;
987 * Re-hash the CFI hash with a boot-time seed while making sure the result is
988 * not a valid ENDBR instruction.
990 static u32 cfi_rehash(u32 hash)
993 while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
1002 static __init int cfi_parse_cmdline(char *str)
1008 char *next = strchr(str, ',');
1014 if (!strcmp(str, "auto")) {
1015 cfi_mode = CFI_DEFAULT;
1016 } else if (!strcmp(str, "off")) {
1019 } else if (!strcmp(str, "kcfi")) {
1020 cfi_mode = CFI_KCFI;
1021 } else if (!strcmp(str, "fineibt")) {
1022 cfi_mode = CFI_FINEIBT;
1023 } else if (!strcmp(str, "norand")) {
1026 pr_err("Ignoring unknown cfi option (%s).", str);
1034 early_param("cfi", cfi_parse_cmdline);
1039 * __cfi_\func: __cfi_\func:
1040 * movl $0x12345678,%eax // 5 endbr64 // 4
1041 * nop subl $0x12345678,%r10d // 7
1055 * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6
1056 * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4
1057 * je 1f // 2 nop4 // 4
1059 * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5
1063 asm( ".pushsection .rodata \n"
1064 "fineibt_preamble_start: \n"
1066 " subl $0x12345678, %r10d \n"
1067 " je fineibt_preamble_end \n"
1070 "fineibt_preamble_end: \n"
1074 extern u8 fineibt_preamble_start[];
1075 extern u8 fineibt_preamble_end[];
1077 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
1078 #define fineibt_preamble_hash 7
1080 asm( ".pushsection .rodata \n"
1081 "fineibt_caller_start: \n"
1082 " movl $0x12345678, %r10d \n"
1085 "fineibt_caller_end: \n"
1089 extern u8 fineibt_caller_start[];
1090 extern u8 fineibt_caller_end[];
1092 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
1093 #define fineibt_caller_hash 2
1095 #define fineibt_caller_jmp (fineibt_caller_size - 2)
1097 static u32 decode_preamble_hash(void *addr)
1101 /* b8 78 56 34 12 mov $0x12345678,%eax */
1103 return *(u32 *)(addr + 1);
1105 return 0; /* invalid hash value */
1108 static u32 decode_caller_hash(void *addr)
1112 /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */
1113 if (p[0] == 0x41 && p[1] == 0xba)
1114 return -*(u32 *)(addr + 2);
1116 /* e8 0c 78 56 34 12 jmp.d8 +12 */
1117 if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
1118 return -*(u32 *)(addr + 2);
1120 return 0; /* invalid hash value */
1123 /* .retpoline_sites */
1124 static int cfi_disable_callers(s32 *start, s32 *end)
1127 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
1128 * in tact for later usage. Also see decode_caller_hash() and
1129 * cfi_rewrite_callers().
1131 const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
1134 for (s = start; s < end; s++) {
1135 void *addr = (void *)s + *s;
1138 addr -= fineibt_caller_size;
1139 hash = decode_caller_hash(addr);
1140 if (!hash) /* nocfi callers */
1143 text_poke_early(addr, jmp, 2);
1149 static int cfi_enable_callers(s32 *start, s32 *end)
1152 * Re-enable kCFI, undo what cfi_disable_callers() did.
1154 const u8 mov[] = { 0x41, 0xba };
1157 for (s = start; s < end; s++) {
1158 void *addr = (void *)s + *s;
1161 addr -= fineibt_caller_size;
1162 hash = decode_caller_hash(addr);
1163 if (!hash) /* nocfi callers */
1166 text_poke_early(addr, mov, 2);
1173 static int cfi_rand_preamble(s32 *start, s32 *end)
1177 for (s = start; s < end; s++) {
1178 void *addr = (void *)s + *s;
1181 hash = decode_preamble_hash(addr);
1182 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1183 addr, addr, 5, addr))
1186 hash = cfi_rehash(hash);
1187 text_poke_early(addr + 1, &hash, 4);
1193 static int cfi_rewrite_preamble(s32 *start, s32 *end)
1197 for (s = start; s < end; s++) {
1198 void *addr = (void *)s + *s;
1201 hash = decode_preamble_hash(addr);
1202 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1203 addr, addr, 5, addr))
1206 text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
1207 WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
1208 text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
1214 static void cfi_rewrite_endbr(s32 *start, s32 *end)
1218 for (s = start; s < end; s++) {
1219 void *addr = (void *)s + *s;
1221 poison_endbr(addr+16, false);
1225 /* .retpoline_sites */
1226 static int cfi_rand_callers(s32 *start, s32 *end)
1230 for (s = start; s < end; s++) {
1231 void *addr = (void *)s + *s;
1234 addr -= fineibt_caller_size;
1235 hash = decode_caller_hash(addr);
1237 hash = -cfi_rehash(hash);
1238 text_poke_early(addr + 2, &hash, 4);
1245 static int cfi_rewrite_callers(s32 *start, s32 *end)
1249 for (s = start; s < end; s++) {
1250 void *addr = (void *)s + *s;
1253 addr -= fineibt_caller_size;
1254 hash = decode_caller_hash(addr);
1256 text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
1257 WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
1258 text_poke_early(addr + fineibt_caller_hash, &hash, 4);
1260 /* rely on apply_retpolines() */
1266 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1267 s32 *start_cfi, s32 *end_cfi, bool builtin)
1271 if (WARN_ONCE(fineibt_preamble_size != 16,
1272 "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
1275 if (cfi_mode == CFI_DEFAULT) {
1276 cfi_mode = CFI_KCFI;
1277 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1278 cfi_mode = CFI_FINEIBT;
1282 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1283 * rewrite them. This disables all CFI. If this succeeds but any of the
1284 * later stages fails, we're without CFI.
1286 ret = cfi_disable_callers(start_retpoline, end_retpoline);
1292 cfi_seed = get_random_u32();
1293 cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
1294 cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
1297 ret = cfi_rand_preamble(start_cfi, end_cfi);
1301 ret = cfi_rand_callers(start_retpoline, end_retpoline);
1309 pr_info("Disabling CFI\n");
1313 ret = cfi_enable_callers(start_retpoline, end_retpoline);
1318 pr_info("Using kCFI\n");
1322 /* place the FineIBT preamble at func()-16 */
1323 ret = cfi_rewrite_preamble(start_cfi, end_cfi);
1327 /* rewrite the callers to target func()-16 */
1328 ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
1332 /* now that nobody targets func()+0, remove ENDBR there */
1333 cfi_rewrite_endbr(start_cfi, end_cfi);
1336 pr_info("Using FineIBT CFI\n");
1344 pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1347 static inline void poison_hash(void *addr)
1352 static void poison_cfi(void *addr)
1364 poison_endbr(addr, false);
1365 poison_hash(addr + fineibt_preamble_hash);
1374 poison_hash(addr + 1);
1384 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1385 s32 *start_cfi, s32 *end_cfi, bool builtin)
1389 #ifdef CONFIG_X86_KERNEL_IBT
1390 static void poison_cfi(void *addr) { }
1395 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1396 s32 *start_cfi, s32 *end_cfi)
1398 return __apply_fineibt(start_retpoline, end_retpoline,
1400 /* .builtin = */ false);
1404 static void alternatives_smp_lock(const s32 *start, const s32 *end,
1405 u8 *text, u8 *text_end)
1409 for (poff = start; poff < end; poff++) {
1410 u8 *ptr = (u8 *)poff + *poff;
1412 if (!*poff || ptr < text || ptr >= text_end)
1414 /* turn DS segment override prefix into lock prefix */
1416 text_poke(ptr, ((unsigned char []){0xf0}), 1);
1420 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
1421 u8 *text, u8 *text_end)
1425 for (poff = start; poff < end; poff++) {
1426 u8 *ptr = (u8 *)poff + *poff;
1428 if (!*poff || ptr < text || ptr >= text_end)
1430 /* turn lock prefix into DS segment override prefix */
1432 text_poke(ptr, ((unsigned char []){0x3E}), 1);
1436 struct smp_alt_module {
1437 /* what is this ??? */
1441 /* ptrs to lock prefixes */
1443 const s32 *locks_end;
1445 /* .text segment, needed to avoid patching init code ;) */
1449 struct list_head next;
1451 static LIST_HEAD(smp_alt_modules);
1452 static bool uniproc_patched = false; /* protected by text_mutex */
1454 void __init_or_module alternatives_smp_module_add(struct module *mod,
1456 void *locks, void *locks_end,
1457 void *text, void *text_end)
1459 struct smp_alt_module *smp;
1461 mutex_lock(&text_mutex);
1462 if (!uniproc_patched)
1465 if (num_possible_cpus() == 1)
1466 /* Don't bother remembering, we'll never have to undo it. */
1469 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
1471 /* we'll run the (safe but slow) SMP code then ... */
1477 smp->locks_end = locks_end;
1479 smp->text_end = text_end;
1480 DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
1481 smp->locks, smp->locks_end,
1482 smp->text, smp->text_end, smp->name);
1484 list_add_tail(&smp->next, &smp_alt_modules);
1486 alternatives_smp_unlock(locks, locks_end, text, text_end);
1488 mutex_unlock(&text_mutex);
1491 void __init_or_module alternatives_smp_module_del(struct module *mod)
1493 struct smp_alt_module *item;
1495 mutex_lock(&text_mutex);
1496 list_for_each_entry(item, &smp_alt_modules, next) {
1497 if (mod != item->mod)
1499 list_del(&item->next);
1503 mutex_unlock(&text_mutex);
1506 void alternatives_enable_smp(void)
1508 struct smp_alt_module *mod;
1510 /* Why bother if there are no other CPUs? */
1511 BUG_ON(num_possible_cpus() == 1);
1513 mutex_lock(&text_mutex);
1515 if (uniproc_patched) {
1516 pr_info("switching to SMP code\n");
1517 BUG_ON(num_online_cpus() != 1);
1518 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
1519 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
1520 list_for_each_entry(mod, &smp_alt_modules, next)
1521 alternatives_smp_lock(mod->locks, mod->locks_end,
1522 mod->text, mod->text_end);
1523 uniproc_patched = false;
1525 mutex_unlock(&text_mutex);
1529 * Return 1 if the address range is reserved for SMP-alternatives.
1530 * Must hold text_mutex.
1532 int alternatives_text_reserved(void *start, void *end)
1534 struct smp_alt_module *mod;
1536 u8 *text_start = start;
1539 lockdep_assert_held(&text_mutex);
1541 list_for_each_entry(mod, &smp_alt_modules, next) {
1542 if (mod->text > text_end || mod->text_end < text_start)
1544 for (poff = mod->locks; poff < mod->locks_end; poff++) {
1545 const u8 *ptr = (const u8 *)poff + *poff;
1547 if (text_start <= ptr && text_end > ptr)
1554 #endif /* CONFIG_SMP */
1557 * Self-test for the INT3 based CALL emulation code.
1559 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
1560 * properly and that there is a stack gap between the INT3 frame and the
1561 * previous context. Without this gap doing a virtual PUSH on the interrupted
1562 * stack would corrupt the INT3 IRET frame.
1564 * See entry_{32,64}.S for more details.
1568 * We define the int3_magic() function in assembly to control the calling
1569 * convention such that we can 'call' it from assembly.
1572 extern void int3_magic(unsigned int *ptr); /* defined in asm */
1575 " .pushsection .init.text, \"ax\", @progbits\n"
1576 " .type int3_magic, @function\n"
1579 " movl $1, (%" _ASM_ARG1 ")\n"
1581 " .size int3_magic, .-int3_magic\n"
1585 extern void int3_selftest_ip(void); /* defined in asm below */
1588 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1590 unsigned long selftest = (unsigned long)&int3_selftest_ip;
1591 struct die_args *args = data;
1592 struct pt_regs *regs = args->regs;
1594 OPTIMIZER_HIDE_VAR(selftest);
1596 if (!regs || user_mode(regs))
1599 if (val != DIE_INT3)
1602 if (regs->ip - INT3_INSN_SIZE != selftest)
1605 int3_emulate_call(regs, (unsigned long)&int3_magic);
1609 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */
1610 static noinline void __init int3_selftest(void)
1612 static __initdata struct notifier_block int3_exception_nb = {
1613 .notifier_call = int3_exception_notify,
1614 .priority = INT_MAX-1, /* last */
1616 unsigned int val = 0;
1618 BUG_ON(register_die_notifier(&int3_exception_nb));
1621 * Basically: int3_magic(&val); but really complicated :-)
1623 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
1624 * notifier above will emulate CALL for us.
1626 asm volatile ("int3_selftest_ip:\n\t"
1628 " int3; nop; nop; nop; nop\n\t"
1629 : ASM_CALL_CONSTRAINT
1630 : __ASM_SEL_RAW(a, D) (&val)
1635 unregister_die_notifier(&int3_exception_nb);
1638 static __initdata int __alt_reloc_selftest_addr;
1640 extern void __init __alt_reloc_selftest(void *arg);
1641 __visible noinline void __init __alt_reloc_selftest(void *arg)
1643 WARN_ON(arg != &__alt_reloc_selftest_addr);
1646 static noinline void __init alt_reloc_selftest(void)
1649 * Tests apply_relocation().
1651 * This has a relative immediate (CALL) in a place other than the first
1652 * instruction and additionally on x86_64 we get a RIP-relative LEA:
1654 * lea 0x0(%rip),%rdi # 5d0: R_X86_64_PC32 .init.data+0x5566c
1655 * call +0 # 5d5: R_X86_64_PLT32 __alt_reloc_selftest-0x4
1657 * Getting this wrong will either crash and burn or tickle the WARN
1660 asm_inline volatile (
1661 ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
1663 : [mem] "m" (__alt_reloc_selftest_addr)
1668 void __init alternative_instructions(void)
1673 * The patching is not fully atomic, so try to avoid local
1674 * interruptions that might execute the to be patched code.
1675 * Other CPUs are not running.
1680 * Don't stop machine check exceptions while patching.
1681 * MCEs only happen when something got corrupted and in this
1682 * case we must do something about the corruption.
1683 * Ignoring it is worse than an unlikely patching race.
1684 * Also machine checks tend to be broadcast and if one CPU
1685 * goes into machine check the others follow quickly, so we don't
1686 * expect a machine check to cause undue problems during to code
1691 * Make sure to set (artificial) features depending on used paravirt
1692 * functions which can later influence alternative patching.
1696 __apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1697 __cfi_sites, __cfi_sites_end, true);
1700 * Rewrite the retpolines, must be done before alternatives since
1701 * those can rewrite the retpoline thunks.
1703 apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1704 apply_returns(__return_sites, __return_sites_end);
1706 apply_alternatives(__alt_instructions, __alt_instructions_end);
1709 * Now all calls are established. Apply the call thunks if
1712 callthunks_patch_builtin_calls();
1715 * Seal all functions that do not have their address taken.
1717 apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
1720 /* Patch to UP if other cpus not imminent. */
1721 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1722 uniproc_patched = true;
1723 alternatives_smp_module_add(NULL, "core kernel",
1724 __smp_locks, __smp_locks_end,
1728 if (!uniproc_patched || num_possible_cpus() == 1) {
1729 free_init_pages("SMP alternatives",
1730 (unsigned long)__smp_locks,
1731 (unsigned long)__smp_locks_end);
1736 alternatives_patched = 1;
1738 alt_reloc_selftest();
1742 * text_poke_early - Update instructions on a live kernel at boot time
1743 * @addr: address to modify
1744 * @opcode: source of the copy
1745 * @len: length to copy
1747 * When you use this code to patch more than one byte of an instruction
1748 * you need to make sure that other CPUs cannot execute this code in parallel.
1749 * Also no thread must be currently preempted in the middle of these
1750 * instructions. And on the local CPU you need to be protected against NMI or
1751 * MCE handlers seeing an inconsistent instruction while you patch.
1753 void __init_or_module text_poke_early(void *addr, const void *opcode,
1756 unsigned long flags;
1758 if (boot_cpu_has(X86_FEATURE_NX) &&
1759 is_module_text_address((unsigned long)addr)) {
1761 * Modules text is marked initially as non-executable, so the
1762 * code cannot be running and speculative code-fetches are
1763 * prevented. Just change the code.
1765 memcpy(addr, opcode, len);
1767 local_irq_save(flags);
1768 memcpy(addr, opcode, len);
1770 local_irq_restore(flags);
1773 * Could also do a CLFLUSH here to speed up CPU recovery; but
1774 * that causes hangs on some VIA CPUs.
1780 struct mm_struct *mm;
1784 * Using a temporary mm allows to set temporary mappings that are not accessible
1785 * by other CPUs. Such mappings are needed to perform sensitive memory writes
1786 * that override the kernel memory protections (e.g., W^X), without exposing the
1787 * temporary page-table mappings that are required for these write operations to
1788 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1789 * mapping is torn down.
1791 * Context: The temporary mm needs to be used exclusively by a single core. To
1792 * harden security IRQs must be disabled while the temporary mm is
1793 * loaded, thereby preventing interrupt handler bugs from overriding
1794 * the kernel memory protection.
1796 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1798 temp_mm_state_t temp_state;
1800 lockdep_assert_irqs_disabled();
1803 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1804 * with a stale address space WITHOUT being in lazy mode after
1805 * restoring the previous mm.
1807 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1808 leave_mm(smp_processor_id());
1810 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1811 switch_mm_irqs_off(NULL, mm, current);
1814 * If breakpoints are enabled, disable them while the temporary mm is
1815 * used. Userspace might set up watchpoints on addresses that are used
1816 * in the temporary mm, which would lead to wrong signals being sent or
1819 * Note that breakpoints are not disabled selectively, which also causes
1820 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1821 * undesirable, but still seems reasonable as the code that runs in the
1822 * temporary mm should be short.
1824 if (hw_breakpoint_active())
1825 hw_breakpoint_disable();
1830 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1832 lockdep_assert_irqs_disabled();
1833 switch_mm_irqs_off(NULL, prev_state.mm, current);
1836 * Restore the breakpoints if they were disabled before the temporary mm
1839 if (hw_breakpoint_active())
1840 hw_breakpoint_restore();
1843 __ro_after_init struct mm_struct *poking_mm;
1844 __ro_after_init unsigned long poking_addr;
1846 static void text_poke_memcpy(void *dst, const void *src, size_t len)
1848 memcpy(dst, src, len);
1851 static void text_poke_memset(void *dst, const void *src, size_t len)
1853 int c = *(const int *)src;
1855 memset(dst, c, len);
1858 typedef void text_poke_f(void *dst, const void *src, size_t len);
1860 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1862 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1863 struct page *pages[2] = {NULL};
1864 temp_mm_state_t prev;
1865 unsigned long flags;
1871 * While boot memory allocator is running we cannot use struct pages as
1872 * they are not yet initialized. There is no way to recover.
1874 BUG_ON(!after_bootmem);
1876 if (!core_kernel_text((unsigned long)addr)) {
1877 pages[0] = vmalloc_to_page(addr);
1878 if (cross_page_boundary)
1879 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1881 pages[0] = virt_to_page(addr);
1882 WARN_ON(!PageReserved(pages[0]));
1883 if (cross_page_boundary)
1884 pages[1] = virt_to_page(addr + PAGE_SIZE);
1887 * If something went wrong, crash and burn since recovery paths are not
1890 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1893 * Map the page without the global bit, as TLB flushing is done with
1894 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1896 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1899 * The lock is not really needed, but this allows to avoid open-coding.
1901 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1904 * This must not fail; preallocated in poking_init().
1908 local_irq_save(flags);
1910 pte = mk_pte(pages[0], pgprot);
1911 set_pte_at(poking_mm, poking_addr, ptep, pte);
1913 if (cross_page_boundary) {
1914 pte = mk_pte(pages[1], pgprot);
1915 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1919 * Loading the temporary mm behaves as a compiler barrier, which
1920 * guarantees that the PTE will be set at the time memcpy() is done.
1922 prev = use_temporary_mm(poking_mm);
1924 kasan_disable_current();
1925 func((u8 *)poking_addr + offset_in_page(addr), src, len);
1926 kasan_enable_current();
1929 * Ensure that the PTE is only cleared after the instructions of memcpy
1930 * were issued by using a compiler barrier.
1934 pte_clear(poking_mm, poking_addr, ptep);
1935 if (cross_page_boundary)
1936 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1939 * Loading the previous page-table hierarchy requires a serializing
1940 * instruction that already allows the core to see the updated version.
1941 * Xen-PV is assumed to serialize execution in a similar manner.
1943 unuse_temporary_mm(prev);
1946 * Flushing the TLB might involve IPIs, which would require enabled
1947 * IRQs, but not if the mm is not used, as it is in this point.
1949 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1950 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1953 if (func == text_poke_memcpy) {
1955 * If the text does not match what we just wrote then something is
1956 * fundamentally screwy; there's nothing we can really do about that.
1958 BUG_ON(memcmp(addr, src, len));
1961 local_irq_restore(flags);
1962 pte_unmap_unlock(ptep, ptl);
1967 * text_poke - Update instructions on a live kernel
1968 * @addr: address to modify
1969 * @opcode: source of the copy
1970 * @len: length to copy
1972 * Only atomic text poke/set should be allowed when not doing early patching.
1973 * It means the size must be writable atomically and the address must be aligned
1974 * in a way that permits an atomic write. It also makes sure we fit on a single
1977 * Note that the caller must ensure that if the modified code is part of a
1978 * module, the module would not be removed during poking. This can be achieved
1979 * by registering a module notifier, and ordering module removal and patching
1982 void *text_poke(void *addr, const void *opcode, size_t len)
1984 lockdep_assert_held(&text_mutex);
1986 return __text_poke(text_poke_memcpy, addr, opcode, len);
1990 * text_poke_kgdb - Update instructions on a live kernel by kgdb
1991 * @addr: address to modify
1992 * @opcode: source of the copy
1993 * @len: length to copy
1995 * Only atomic text poke/set should be allowed when not doing early patching.
1996 * It means the size must be writable atomically and the address must be aligned
1997 * in a way that permits an atomic write. It also makes sure we fit on a single
2000 * Context: should only be used by kgdb, which ensures no other core is running,
2001 * despite the fact it does not hold the text_mutex.
2003 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
2005 return __text_poke(text_poke_memcpy, addr, opcode, len);
2008 void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
2011 unsigned long start = (unsigned long)addr;
2014 if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
2017 while (patched < len) {
2018 unsigned long ptr = start + patched;
2021 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2023 __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
2030 * text_poke_copy - Copy instructions into (an unused part of) RX memory
2031 * @addr: address to modify
2032 * @opcode: source of the copy
2033 * @len: length to copy, could be more than 2x PAGE_SIZE
2035 * Not safe against concurrent execution; useful for JITs to dump
2036 * new code blocks into unused regions of RX memory. Can be used in
2037 * conjunction with synchronize_rcu_tasks() to wait for existing
2038 * execution to quiesce after having made sure no existing functions
2039 * pointers are live.
2041 void *text_poke_copy(void *addr, const void *opcode, size_t len)
2043 mutex_lock(&text_mutex);
2044 addr = text_poke_copy_locked(addr, opcode, len, false);
2045 mutex_unlock(&text_mutex);
2050 * text_poke_set - memset into (an unused part of) RX memory
2051 * @addr: address to modify
2052 * @c: the byte to fill the area with
2053 * @len: length to copy, could be more than 2x PAGE_SIZE
2055 * This is useful to overwrite unused regions of RX memory with illegal
2058 void *text_poke_set(void *addr, int c, size_t len)
2060 unsigned long start = (unsigned long)addr;
2063 if (WARN_ON_ONCE(core_kernel_text(start)))
2066 mutex_lock(&text_mutex);
2067 while (patched < len) {
2068 unsigned long ptr = start + patched;
2071 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2073 __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
2076 mutex_unlock(&text_mutex);
2080 static void do_sync_core(void *info)
2085 void text_poke_sync(void)
2087 on_each_cpu(do_sync_core, NULL, 1);
2091 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
2092 * this thing. When len == 6 everything is prefixed with 0x0f and we map
2093 * opcode to Jcc.d8, using len to distinguish.
2095 struct text_poke_loc {
2096 /* addr := _stext + rel_addr */
2101 const u8 text[POKE_MAX_OPCODE_SIZE];
2102 /* see text_poke_bp_batch() */
2106 struct bp_patching_desc {
2107 struct text_poke_loc *vec;
2112 static struct bp_patching_desc bp_desc;
2114 static __always_inline
2115 struct bp_patching_desc *try_get_desc(void)
2117 struct bp_patching_desc *desc = &bp_desc;
2119 if (!raw_atomic_inc_not_zero(&desc->refs))
2125 static __always_inline void put_desc(void)
2127 struct bp_patching_desc *desc = &bp_desc;
2129 smp_mb__before_atomic();
2130 raw_atomic_dec(&desc->refs);
2133 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
2135 return _stext + tp->rel_addr;
2138 static __always_inline int patch_cmp(const void *key, const void *elt)
2140 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
2142 if (key < text_poke_addr(tp))
2144 if (key > text_poke_addr(tp))
2149 noinstr int poke_int3_handler(struct pt_regs *regs)
2151 struct bp_patching_desc *desc;
2152 struct text_poke_loc *tp;
2156 if (user_mode(regs))
2160 * Having observed our INT3 instruction, we now must observe
2161 * bp_desc with non-zero refcount:
2163 * bp_desc.refs = 1 INT3
2165 * write INT3 if (bp_desc.refs != 0)
2169 desc = try_get_desc();
2174 * Discount the INT3. See text_poke_bp_batch().
2176 ip = (void *) regs->ip - INT3_INSN_SIZE;
2179 * Skip the binary search if there is a single member in the vector.
2181 if (unlikely(desc->nr_entries > 1)) {
2182 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
2183 sizeof(struct text_poke_loc),
2189 if (text_poke_addr(tp) != ip)
2195 switch (tp->opcode) {
2196 case INT3_INSN_OPCODE:
2198 * Someone poked an explicit INT3, they'll want to handle it,
2203 case RET_INSN_OPCODE:
2204 int3_emulate_ret(regs);
2207 case CALL_INSN_OPCODE:
2208 int3_emulate_call(regs, (long)ip + tp->disp);
2211 case JMP32_INSN_OPCODE:
2212 case JMP8_INSN_OPCODE:
2213 int3_emulate_jmp(regs, (long)ip + tp->disp);
2216 case 0x70 ... 0x7f: /* Jcc */
2217 int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
2231 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
2232 static struct text_poke_loc tp_vec[TP_VEC_MAX];
2233 static int tp_vec_nr;
2236 * text_poke_bp_batch() -- update instructions on live kernel on SMP
2237 * @tp: vector of instructions to patch
2238 * @nr_entries: number of entries in the vector
2240 * Modify multi-byte instruction by using int3 breakpoint on SMP.
2241 * We completely avoid stop_machine() here, and achieve the
2242 * synchronization using int3 breakpoint.
2244 * The way it is done:
2245 * - For each entry in the vector:
2246 * - add a int3 trap to the address that will be patched
2248 * - For each entry in the vector:
2249 * - update all but the first byte of the patched range
2251 * - For each entry in the vector:
2252 * - replace the first byte (int3) by the first byte of
2256 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
2258 unsigned char int3 = INT3_INSN_OPCODE;
2262 lockdep_assert_held(&text_mutex);
2265 bp_desc.nr_entries = nr_entries;
2268 * Corresponds to the implicit memory barrier in try_get_desc() to
2269 * ensure reading a non-zero refcount provides up to date bp_desc data.
2271 atomic_set_release(&bp_desc.refs, 1);
2274 * Function tracing can enable thousands of places that need to be
2275 * updated. This can take quite some time, and with full kernel debugging
2276 * enabled, this could cause the softlockup watchdog to trigger.
2277 * This function gets called every 256 entries added to be patched.
2278 * Call cond_resched() here to make sure that other tasks can get scheduled
2279 * while processing all the functions being patched.
2284 * Corresponding read barrier in int3 notifier for making sure the
2285 * nr_entries and handler are correctly ordered wrt. patching.
2290 * First step: add a int3 trap to the address that will be patched.
2292 for (i = 0; i < nr_entries; i++) {
2293 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
2294 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
2300 * Second step: update all but the first byte of the patched range.
2302 for (do_sync = 0, i = 0; i < nr_entries; i++) {
2303 u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
2304 u8 _new[POKE_MAX_OPCODE_SIZE+1];
2305 const u8 *new = tp[i].text;
2306 int len = tp[i].len;
2308 if (len - INT3_INSN_SIZE > 0) {
2309 memcpy(old + INT3_INSN_SIZE,
2310 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2311 len - INT3_INSN_SIZE);
2315 memcpy(_new + 1, new, 5);
2319 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2320 new + INT3_INSN_SIZE,
2321 len - INT3_INSN_SIZE);
2327 * Emit a perf event to record the text poke, primarily to
2328 * support Intel PT decoding which must walk the executable code
2329 * to reconstruct the trace. The flow up to here is:
2332 * - write instruction tail
2333 * At this point the actual control flow will be through the
2334 * INT3 and handler and not hit the old or new instruction.
2335 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
2336 * can still be decoded. Subsequently:
2337 * - emit RECORD_TEXT_POKE with the new instruction
2339 * - write first byte
2341 * So before the text poke event timestamp, the decoder will see
2342 * either the old instruction flow or FUP/TIP of INT3. After the
2343 * text poke event timestamp, the decoder will see either the
2344 * new instruction flow or FUP/TIP of INT3. Thus decoders can
2345 * use the timestamp as the point at which to modify the
2347 * The old instruction is recorded so that the event can be
2348 * processed forwards or backwards.
2350 perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
2355 * According to Intel, this core syncing is very likely
2356 * not necessary and we'd be safe even without it. But
2357 * better safe than sorry (plus there's not only Intel).
2363 * Third step: replace the first byte (int3) by the first byte of
2366 for (do_sync = 0, i = 0; i < nr_entries; i++) {
2367 u8 byte = tp[i].text[0];
2372 if (byte == INT3_INSN_OPCODE)
2375 text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
2383 * Remove and wait for refs to be zero.
2385 if (!atomic_dec_and_test(&bp_desc.refs))
2386 atomic_cond_read_acquire(&bp_desc.refs, !VAL);
2389 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
2390 const void *opcode, size_t len, const void *emulate)
2397 memcpy((void *)tp->text, opcode+i, len-i);
2401 ret = insn_decode_kernel(&insn, emulate);
2404 tp->rel_addr = addr - (void *)_stext;
2406 tp->opcode = insn.opcode.bytes[0];
2408 if (is_jcc32(&insn)) {
2410 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
2412 tp->opcode = insn.opcode.bytes[1] - 0x10;
2415 switch (tp->opcode) {
2416 case RET_INSN_OPCODE:
2417 case JMP32_INSN_OPCODE:
2418 case JMP8_INSN_OPCODE:
2420 * Control flow instructions without implied execution of the
2421 * next instruction can be padded with INT3.
2423 for (i = insn.length; i < len; i++)
2424 BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
2428 BUG_ON(len != insn.length);
2431 switch (tp->opcode) {
2432 case INT3_INSN_OPCODE:
2433 case RET_INSN_OPCODE:
2436 case CALL_INSN_OPCODE:
2437 case JMP32_INSN_OPCODE:
2438 case JMP8_INSN_OPCODE:
2439 case 0x70 ... 0x7f: /* Jcc */
2440 tp->disp = insn.immediate.value;
2443 default: /* assume NOP */
2445 case 2: /* NOP2 -- emulate as JMP8+0 */
2446 BUG_ON(memcmp(emulate, x86_nops[len], len));
2447 tp->opcode = JMP8_INSN_OPCODE;
2451 case 5: /* NOP5 -- emulate as JMP32+0 */
2452 BUG_ON(memcmp(emulate, x86_nops[len], len));
2453 tp->opcode = JMP32_INSN_OPCODE;
2457 default: /* unknown instruction */
2465 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
2468 static bool tp_order_fail(void *addr)
2470 struct text_poke_loc *tp;
2475 if (!addr) /* force */
2478 tp = &tp_vec[tp_vec_nr - 1];
2479 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
2485 static void text_poke_flush(void *addr)
2487 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
2488 text_poke_bp_batch(tp_vec, tp_vec_nr);
2493 void text_poke_finish(void)
2495 text_poke_flush(NULL);
2498 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
2500 struct text_poke_loc *tp;
2502 text_poke_flush(addr);
2504 tp = &tp_vec[tp_vec_nr++];
2505 text_poke_loc_init(tp, addr, opcode, len, emulate);
2509 * text_poke_bp() -- update instructions on live kernel on SMP
2510 * @addr: address to patch
2511 * @opcode: opcode of new instruction
2512 * @len: length to copy
2513 * @emulate: instruction to be emulated
2515 * Update a single instruction with the vector in the stack, avoiding
2516 * dynamically allocated memory. This function should be used when it is
2517 * not possible to allocate memory.
2519 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
2521 struct text_poke_loc tp;
2523 text_poke_loc_init(&tp, addr, opcode, len, emulate);
2524 text_poke_bp_batch(&tp, 1);