1 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <asm/text-patching.h>
15 #include <asm/alternative.h>
16 #include <asm/sections.h>
17 #include <asm/pgtable.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 #include <asm/fixmap.h>
25 int __read_mostly alternatives_patched;
27 EXPORT_SYMBOL_GPL(alternatives_patched);
29 #define MAX_PATCH_LEN (255-1)
31 static int __initdata_or_module debug_alternative;
33 static int __init debug_alt(char *str)
35 debug_alternative = 1;
38 __setup("debug-alternative", debug_alt);
40 static int noreplace_smp;
42 static int __init setup_noreplace_smp(char *str)
47 __setup("noreplace-smp", setup_noreplace_smp);
49 #define DPRINTK(fmt, args...) \
51 if (debug_alternative) \
52 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
55 #define DUMP_BYTES(buf, len, fmt, args...) \
57 if (unlikely(debug_alternative)) { \
63 printk(KERN_DEBUG fmt, ##args); \
64 for (j = 0; j < (len) - 1; j++) \
65 printk(KERN_CONT "%02hhx ", buf[j]); \
66 printk(KERN_CONT "%02hhx\n", buf[j]); \
71 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
72 * that correspond to that nop. Getting from one nop to the next, we
73 * add to the array the offset that is equal to the sum of all sizes of
74 * nops preceding the one we are after.
76 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
77 * nice symmetry of sizes of the previous nops.
79 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
80 static const unsigned char intelnops[] =
92 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
98 intelnops + 1 + 2 + 3,
99 intelnops + 1 + 2 + 3 + 4,
100 intelnops + 1 + 2 + 3 + 4 + 5,
101 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
108 static const unsigned char k8nops[] =
120 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
127 k8nops + 1 + 2 + 3 + 4,
128 k8nops + 1 + 2 + 3 + 4 + 5,
129 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
135 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136 static const unsigned char k7nops[] =
148 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
155 k7nops + 1 + 2 + 3 + 4,
156 k7nops + 1 + 2 + 3 + 4 + 5,
157 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
164 static const unsigned char p6nops[] =
176 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
183 p6nops + 1 + 2 + 3 + 4,
184 p6nops + 1 + 2 + 3 + 4 + 5,
185 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
191 /* Initialize these to a safe default */
193 const unsigned char * const *ideal_nops = p6_nops;
195 const unsigned char * const *ideal_nops = intel_nops;
198 void __init arch_init_ideal_nops(void)
200 switch (boot_cpu_data.x86_vendor) {
201 case X86_VENDOR_INTEL:
203 * Due to a decoder implementation quirk, some
204 * specific Intel CPUs actually perform better with
205 * the "k8_nops" than with the SDM-recommended NOPs.
207 if (boot_cpu_data.x86 == 6 &&
208 boot_cpu_data.x86_model >= 0x0f &&
209 boot_cpu_data.x86_model != 0x1c &&
210 boot_cpu_data.x86_model != 0x26 &&
211 boot_cpu_data.x86_model != 0x27 &&
212 boot_cpu_data.x86_model < 0x30) {
213 ideal_nops = k8_nops;
214 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215 ideal_nops = p6_nops;
218 ideal_nops = k8_nops;
220 ideal_nops = intel_nops;
226 if (boot_cpu_data.x86 > 0xf) {
227 ideal_nops = p6_nops;
235 ideal_nops = k8_nops;
237 if (boot_cpu_has(X86_FEATURE_K8))
238 ideal_nops = k8_nops;
239 else if (boot_cpu_has(X86_FEATURE_K7))
240 ideal_nops = k7_nops;
242 ideal_nops = intel_nops;
247 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
248 static void __init_or_module add_nops(void *insns, unsigned int len)
251 unsigned int noplen = len;
252 if (noplen > ASM_NOP_MAX)
253 noplen = ASM_NOP_MAX;
254 memcpy(insns, ideal_nops[noplen], noplen);
260 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
261 extern s32 __smp_locks[], __smp_locks_end[];
262 void *text_poke_early(void *addr, const void *opcode, size_t len);
265 * Are we looking at a near JMP with a 1 or 4-byte displacement.
267 static inline bool is_jmp(const u8 opcode)
269 return opcode == 0xeb || opcode == 0xe9;
272 static void __init_or_module
273 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
275 u8 *next_rip, *tgt_rip;
279 if (a->replacementlen != 5)
282 o_dspl = *(s32 *)(insnbuf + 1);
284 /* next_rip of the replacement JMP */
285 next_rip = repl_insn + a->replacementlen;
286 /* target rip of the replacement JMP */
287 tgt_rip = next_rip + o_dspl;
288 n_dspl = tgt_rip - orig_insn;
290 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
292 if (tgt_rip - orig_insn >= 0) {
293 if (n_dspl - 2 <= 127)
297 /* negative offset */
299 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
309 insnbuf[1] = (s8)n_dspl;
310 add_nops(insnbuf + 2, 3);
319 *(s32 *)&insnbuf[1] = n_dspl;
325 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
326 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
329 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
334 for (i = 0; i < a->padlen; i++) {
335 if (instr[i] != 0x90)
339 local_irq_save(flags);
340 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
342 local_irq_restore(flags);
344 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
345 instr, a->instrlen - a->padlen, a->padlen);
349 * Replace instructions with better alternatives for this CPU type. This runs
350 * before SMP is initialized to avoid SMP problems with self modifying code.
351 * This implies that asymmetric systems where APs have less capabilities than
352 * the boot processor are not handled. Tough. Make sure you disable such
355 void __init_or_module apply_alternatives(struct alt_instr *start,
356 struct alt_instr *end)
359 u8 *instr, *replacement;
360 u8 insnbuf[MAX_PATCH_LEN];
362 DPRINTK("alt table %p -> %p", start, end);
364 * The scan order should be from start to end. A later scanned
365 * alternative code can overwrite previously scanned alternative code.
366 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
369 * So be careful if you want to change the scan order to any other
372 for (a = start; a < end; a++) {
375 instr = (u8 *)&a->instr_offset + a->instr_offset;
376 replacement = (u8 *)&a->repl_offset + a->repl_offset;
377 BUG_ON(a->instrlen > sizeof(insnbuf));
378 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
379 if (!boot_cpu_has(a->cpuid)) {
381 optimize_nops(a, instr);
386 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
390 replacement, a->replacementlen, a->padlen);
392 DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
393 DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
395 memcpy(insnbuf, replacement, a->replacementlen);
396 insnbuf_sz = a->replacementlen;
398 /* 0xe8 is a relative jump; fix the offset. */
399 if (*insnbuf == 0xe8 && a->replacementlen == 5) {
400 *(s32 *)(insnbuf + 1) += replacement - instr;
401 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
402 *(s32 *)(insnbuf + 1),
403 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
406 if (a->replacementlen && is_jmp(replacement[0]))
407 recompute_jump(a, instr, replacement, insnbuf);
409 if (a->instrlen > a->replacementlen) {
410 add_nops(insnbuf + a->replacementlen,
411 a->instrlen - a->replacementlen);
412 insnbuf_sz += a->instrlen - a->replacementlen;
414 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
416 text_poke_early(instr, insnbuf, insnbuf_sz);
421 static void alternatives_smp_lock(const s32 *start, const s32 *end,
422 u8 *text, u8 *text_end)
426 mutex_lock(&text_mutex);
427 for (poff = start; poff < end; poff++) {
428 u8 *ptr = (u8 *)poff + *poff;
430 if (!*poff || ptr < text || ptr >= text_end)
432 /* turn DS segment override prefix into lock prefix */
434 text_poke(ptr, ((unsigned char []){0xf0}), 1);
436 mutex_unlock(&text_mutex);
439 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
440 u8 *text, u8 *text_end)
444 mutex_lock(&text_mutex);
445 for (poff = start; poff < end; poff++) {
446 u8 *ptr = (u8 *)poff + *poff;
448 if (!*poff || ptr < text || ptr >= text_end)
450 /* turn lock prefix into DS segment override prefix */
452 text_poke(ptr, ((unsigned char []){0x3E}), 1);
454 mutex_unlock(&text_mutex);
457 struct smp_alt_module {
458 /* what is this ??? */
462 /* ptrs to lock prefixes */
464 const s32 *locks_end;
466 /* .text segment, needed to avoid patching init code ;) */
470 struct list_head next;
472 static LIST_HEAD(smp_alt_modules);
473 static DEFINE_MUTEX(smp_alt);
474 static bool uniproc_patched = false; /* protected by smp_alt */
476 void __init_or_module alternatives_smp_module_add(struct module *mod,
478 void *locks, void *locks_end,
479 void *text, void *text_end)
481 struct smp_alt_module *smp;
483 mutex_lock(&smp_alt);
484 if (!uniproc_patched)
487 if (num_possible_cpus() == 1)
488 /* Don't bother remembering, we'll never have to undo it. */
491 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
493 /* we'll run the (safe but slow) SMP code then ... */
499 smp->locks_end = locks_end;
501 smp->text_end = text_end;
502 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
503 smp->locks, smp->locks_end,
504 smp->text, smp->text_end, smp->name);
506 list_add_tail(&smp->next, &smp_alt_modules);
508 alternatives_smp_unlock(locks, locks_end, text, text_end);
510 mutex_unlock(&smp_alt);
513 void __init_or_module alternatives_smp_module_del(struct module *mod)
515 struct smp_alt_module *item;
517 mutex_lock(&smp_alt);
518 list_for_each_entry(item, &smp_alt_modules, next) {
519 if (mod != item->mod)
521 list_del(&item->next);
525 mutex_unlock(&smp_alt);
528 void alternatives_enable_smp(void)
530 struct smp_alt_module *mod;
532 /* Why bother if there are no other CPUs? */
533 BUG_ON(num_possible_cpus() == 1);
535 mutex_lock(&smp_alt);
537 if (uniproc_patched) {
538 pr_info("switching to SMP code\n");
539 BUG_ON(num_online_cpus() != 1);
540 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
541 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
542 list_for_each_entry(mod, &smp_alt_modules, next)
543 alternatives_smp_lock(mod->locks, mod->locks_end,
544 mod->text, mod->text_end);
545 uniproc_patched = false;
547 mutex_unlock(&smp_alt);
550 /* Return 1 if the address range is reserved for smp-alternatives */
551 int alternatives_text_reserved(void *start, void *end)
553 struct smp_alt_module *mod;
555 u8 *text_start = start;
558 list_for_each_entry(mod, &smp_alt_modules, next) {
559 if (mod->text > text_end || mod->text_end < text_start)
561 for (poff = mod->locks; poff < mod->locks_end; poff++) {
562 const u8 *ptr = (const u8 *)poff + *poff;
564 if (text_start <= ptr && text_end > ptr)
571 #endif /* CONFIG_SMP */
573 #ifdef CONFIG_PARAVIRT
574 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
575 struct paravirt_patch_site *end)
577 struct paravirt_patch_site *p;
578 char insnbuf[MAX_PATCH_LEN];
580 for (p = start; p < end; p++) {
583 BUG_ON(p->len > MAX_PATCH_LEN);
584 /* prep the buffer with the original instructions */
585 memcpy(insnbuf, p->instr, p->len);
586 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
587 (unsigned long)p->instr, p->len);
589 BUG_ON(used > p->len);
591 /* Pad the rest with nops */
592 add_nops(insnbuf + used, p->len - used);
593 text_poke_early(p->instr, insnbuf, p->len);
596 extern struct paravirt_patch_site __start_parainstructions[],
597 __stop_parainstructions[];
598 #endif /* CONFIG_PARAVIRT */
600 void __init alternative_instructions(void)
602 /* The patching is not fully atomic, so try to avoid local interruptions
603 that might execute the to be patched code.
604 Other CPUs are not running. */
608 * Don't stop machine check exceptions while patching.
609 * MCEs only happen when something got corrupted and in this
610 * case we must do something about the corruption.
611 * Ignoring it is worse than a unlikely patching race.
612 * Also machine checks tend to be broadcast and if one CPU
613 * goes into machine check the others follow quickly, so we don't
614 * expect a machine check to cause undue problems during to code
618 apply_alternatives(__alt_instructions, __alt_instructions_end);
621 /* Patch to UP if other cpus not imminent. */
622 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
623 uniproc_patched = true;
624 alternatives_smp_module_add(NULL, "core kernel",
625 __smp_locks, __smp_locks_end,
629 if (!uniproc_patched || num_possible_cpus() == 1)
630 free_init_pages("SMP alternatives",
631 (unsigned long)__smp_locks,
632 (unsigned long)__smp_locks_end);
635 apply_paravirt(__parainstructions, __parainstructions_end);
638 alternatives_patched = 1;
642 * text_poke_early - Update instructions on a live kernel at boot time
643 * @addr: address to modify
644 * @opcode: source of the copy
645 * @len: length to copy
647 * When you use this code to patch more than one byte of an instruction
648 * you need to make sure that other CPUs cannot execute this code in parallel.
649 * Also no thread must be currently preempted in the middle of these
650 * instructions. And on the local CPU you need to be protected again NMI or MCE
651 * handlers seeing an inconsistent instruction while you patch.
653 void *__init_or_module text_poke_early(void *addr, const void *opcode,
657 local_irq_save(flags);
658 memcpy(addr, opcode, len);
660 local_irq_restore(flags);
661 /* Could also do a CLFLUSH here to speed up CPU recovery; but
662 that causes hangs on some VIA CPUs. */
667 * text_poke - Update instructions on a live kernel
668 * @addr: address to modify
669 * @opcode: source of the copy
670 * @len: length to copy
672 * Only atomic text poke/set should be allowed when not doing early patching.
673 * It means the size must be writable atomically and the address must be aligned
674 * in a way that permits an atomic write. It also makes sure we fit on a single
677 * Note: Must be called under text_mutex.
679 void *text_poke(void *addr, const void *opcode, size_t len)
683 struct page *pages[2];
686 if (!core_kernel_text((unsigned long)addr)) {
687 pages[0] = vmalloc_to_page(addr);
688 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
690 pages[0] = virt_to_page(addr);
691 WARN_ON(!PageReserved(pages[0]));
692 pages[1] = virt_to_page(addr + PAGE_SIZE);
695 local_irq_save(flags);
696 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
698 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
699 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
700 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
701 clear_fixmap(FIX_TEXT_POKE0);
703 clear_fixmap(FIX_TEXT_POKE1);
706 /* Could also do a CLFLUSH here to speed up CPU recovery; but
707 that causes hangs on some VIA CPUs. */
708 for (i = 0; i < len; i++)
709 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
710 local_irq_restore(flags);
714 static void do_sync_core(void *info)
719 static bool bp_patching_in_progress;
720 static void *bp_int3_handler, *bp_int3_addr;
722 int poke_int3_handler(struct pt_regs *regs)
724 /* bp_patching_in_progress */
727 if (likely(!bp_patching_in_progress))
730 if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
733 /* set up the specified breakpoint handler */
734 regs->ip = (unsigned long) bp_int3_handler;
741 * text_poke_bp() -- update instructions on live kernel on SMP
742 * @addr: address to patch
743 * @opcode: opcode of new instruction
744 * @len: length to copy
745 * @handler: address to jump to when the temporary breakpoint is hit
747 * Modify multi-byte instruction by using int3 breakpoint on SMP.
748 * We completely avoid stop_machine() here, and achieve the
749 * synchronization using int3 breakpoint.
751 * The way it is done:
752 * - add a int3 trap to the address that will be patched
754 * - update all but the first byte of the patched range
756 * - replace the first byte (int3) by the first byte of
760 * Note: must be called under text_mutex.
762 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
764 unsigned char int3 = 0xcc;
766 bp_int3_handler = handler;
767 bp_int3_addr = (u8 *)addr + sizeof(int3);
768 bp_patching_in_progress = true;
770 * Corresponding read barrier in int3 notifier for
771 * making sure the in_progress flags is correctly ordered wrt.
776 text_poke(addr, &int3, sizeof(int3));
778 on_each_cpu(do_sync_core, NULL, 1);
780 if (len - sizeof(int3) > 0) {
781 /* patch all but the first byte */
782 text_poke((char *)addr + sizeof(int3),
783 (const char *) opcode + sizeof(int3),
786 * According to Intel, this core syncing is very likely
787 * not necessary and we'd be safe even without it. But
788 * better safe than sorry (plus there's not only Intel).
790 on_each_cpu(do_sync_core, NULL, 1);
793 /* patch the first byte */
794 text_poke(addr, opcode, sizeof(int3));
796 on_each_cpu(do_sync_core, NULL, 1);
798 bp_patching_in_progress = false;