2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
6 * Alexander Graf <agraf@suse.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kmemleak.h>
26 #include <linux/kvm_para.h>
27 #include <linux/slab.h>
29 #include <linux/pagemap.h>
32 #include <asm/sections.h>
33 #include <asm/cacheflush.h>
34 #include <asm/disassemble.h>
35 #include <asm/ppc-opcode.h>
36 #include <asm/epapr_hcalls.h>
38 #define KVM_MAGIC_PAGE (-4096L)
39 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
41 #define KVM_INST_LWZ 0x80000000
42 #define KVM_INST_STW 0x90000000
43 #define KVM_INST_LD 0xe8000000
44 #define KVM_INST_STD 0xf8000000
45 #define KVM_INST_NOP 0x60000000
46 #define KVM_INST_B 0x48000000
47 #define KVM_INST_B_MASK 0x03ffffff
48 #define KVM_INST_B_MAX 0x01ffffff
49 #define KVM_INST_LI 0x38000000
51 #define KVM_MASK_RT 0x03e00000
52 #define KVM_RT_30 0x03c00000
53 #define KVM_MASK_RB 0x0000f800
54 #define KVM_INST_MFMSR 0x7c0000a6
59 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
60 (((sprn) & 0x1f) << 16) | \
61 (((sprn) & 0x3e0) << 6) | \
64 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
65 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
67 #define KVM_INST_TLBSYNC 0x7c00046c
68 #define KVM_INST_MTMSRD_L0 0x7c000164
69 #define KVM_INST_MTMSRD_L1 0x7c010164
70 #define KVM_INST_MTMSR 0x7c000124
72 #define KVM_INST_WRTEE 0x7c000106
73 #define KVM_INST_WRTEEI_0 0x7c000146
74 #define KVM_INST_WRTEEI_1 0x7c008146
76 #define KVM_INST_MTSRIN 0x7c0001e4
78 static bool kvm_patching_worked = true;
79 char kvm_tmp[1024 * 1024];
80 static int kvm_tmp_index;
82 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
85 flush_icache_range((ulong)inst, (ulong)inst + 4);
88 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
91 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
93 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
97 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
100 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
102 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
106 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
108 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
111 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
114 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
116 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
120 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
122 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
125 static void kvm_patch_ins_nop(u32 *inst)
127 kvm_patch_ins(inst, KVM_INST_NOP);
130 static void kvm_patch_ins_b(u32 *inst, int addr)
132 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
133 /* On relocatable kernels interrupts handlers and our code
134 can be in different regions, so we don't patch them */
136 if ((ulong)inst < (ulong)&__end_interrupts)
140 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
143 static u32 *kvm_alloc(int len)
147 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
148 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
150 kvm_patching_worked = false;
154 p = (void*)&kvm_tmp[kvm_tmp_index];
155 kvm_tmp_index += len;
160 extern u32 kvm_emulate_mtmsrd_branch_offs;
161 extern u32 kvm_emulate_mtmsrd_reg_offs;
162 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
163 extern u32 kvm_emulate_mtmsrd_len;
164 extern u32 kvm_emulate_mtmsrd[];
166 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
173 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
177 /* Find out where we are and put everything there */
178 distance_start = (ulong)p - (ulong)inst;
179 next_inst = ((ulong)inst + 4);
180 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
182 /* Make sure we only write valid b instructions */
183 if (distance_start > KVM_INST_B_MAX) {
184 kvm_patching_worked = false;
188 /* Modify the chunk to fit the invocation */
189 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
190 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
191 switch (get_rt(rt)) {
193 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
194 magic_var(scratch2), KVM_RT_30);
197 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
198 magic_var(scratch1), KVM_RT_30);
201 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
205 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
206 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
208 /* Patch the invocation */
209 kvm_patch_ins_b(inst, distance_start);
212 extern u32 kvm_emulate_mtmsr_branch_offs;
213 extern u32 kvm_emulate_mtmsr_reg1_offs;
214 extern u32 kvm_emulate_mtmsr_reg2_offs;
215 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
216 extern u32 kvm_emulate_mtmsr_len;
217 extern u32 kvm_emulate_mtmsr[];
219 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
226 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
230 /* Find out where we are and put everything there */
231 distance_start = (ulong)p - (ulong)inst;
232 next_inst = ((ulong)inst + 4);
233 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
235 /* Make sure we only write valid b instructions */
236 if (distance_start > KVM_INST_B_MAX) {
237 kvm_patching_worked = false;
241 /* Modify the chunk to fit the invocation */
242 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
243 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
245 /* Make clobbered registers work too */
246 switch (get_rt(rt)) {
248 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
249 magic_var(scratch2), KVM_RT_30);
250 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
251 magic_var(scratch2), KVM_RT_30);
254 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
255 magic_var(scratch1), KVM_RT_30);
256 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
257 magic_var(scratch1), KVM_RT_30);
260 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
261 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
265 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
266 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
268 /* Patch the invocation */
269 kvm_patch_ins_b(inst, distance_start);
274 extern u32 kvm_emulate_wrtee_branch_offs;
275 extern u32 kvm_emulate_wrtee_reg_offs;
276 extern u32 kvm_emulate_wrtee_orig_ins_offs;
277 extern u32 kvm_emulate_wrtee_len;
278 extern u32 kvm_emulate_wrtee[];
280 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
287 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
291 /* Find out where we are and put everything there */
292 distance_start = (ulong)p - (ulong)inst;
293 next_inst = ((ulong)inst + 4);
294 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
296 /* Make sure we only write valid b instructions */
297 if (distance_start > KVM_INST_B_MAX) {
298 kvm_patching_worked = false;
302 /* Modify the chunk to fit the invocation */
303 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
304 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
307 p[kvm_emulate_wrtee_reg_offs] =
308 KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
310 /* Make clobbered registers work too */
311 switch (get_rt(rt)) {
313 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
314 magic_var(scratch2), KVM_RT_30);
317 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
318 magic_var(scratch1), KVM_RT_30);
321 p[kvm_emulate_wrtee_reg_offs] |= rt;
326 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
327 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
329 /* Patch the invocation */
330 kvm_patch_ins_b(inst, distance_start);
333 extern u32 kvm_emulate_wrteei_0_branch_offs;
334 extern u32 kvm_emulate_wrteei_0_len;
335 extern u32 kvm_emulate_wrteei_0[];
337 static void kvm_patch_ins_wrteei_0(u32 *inst)
344 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
348 /* Find out where we are and put everything there */
349 distance_start = (ulong)p - (ulong)inst;
350 next_inst = ((ulong)inst + 4);
351 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
353 /* Make sure we only write valid b instructions */
354 if (distance_start > KVM_INST_B_MAX) {
355 kvm_patching_worked = false;
359 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
360 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
361 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
363 /* Patch the invocation */
364 kvm_patch_ins_b(inst, distance_start);
369 #ifdef CONFIG_PPC_BOOK3S_32
371 extern u32 kvm_emulate_mtsrin_branch_offs;
372 extern u32 kvm_emulate_mtsrin_reg1_offs;
373 extern u32 kvm_emulate_mtsrin_reg2_offs;
374 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
375 extern u32 kvm_emulate_mtsrin_len;
376 extern u32 kvm_emulate_mtsrin[];
378 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
385 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
389 /* Find out where we are and put everything there */
390 distance_start = (ulong)p - (ulong)inst;
391 next_inst = ((ulong)inst + 4);
392 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
394 /* Make sure we only write valid b instructions */
395 if (distance_start > KVM_INST_B_MAX) {
396 kvm_patching_worked = false;
400 /* Modify the chunk to fit the invocation */
401 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
402 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
403 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
404 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
405 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
406 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
408 /* Patch the invocation */
409 kvm_patch_ins_b(inst, distance_start);
414 static void kvm_map_magic_page(void *data)
416 u32 *features = data;
421 in[0] = KVM_MAGIC_PAGE;
422 in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
424 epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
429 static void kvm_check_ins(u32 *inst, u32 features)
432 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
433 u32 inst_rt = _inst & KVM_MASK_RT;
435 switch (inst_no_rt) {
438 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
440 case KVM_INST_MFSPR(SPRN_SPRG0):
441 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
443 case KVM_INST_MFSPR(SPRN_SPRG1):
444 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
446 case KVM_INST_MFSPR(SPRN_SPRG2):
447 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
449 case KVM_INST_MFSPR(SPRN_SPRG3):
450 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
452 case KVM_INST_MFSPR(SPRN_SRR0):
453 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
455 case KVM_INST_MFSPR(SPRN_SRR1):
456 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
459 case KVM_INST_MFSPR(SPRN_DEAR):
461 case KVM_INST_MFSPR(SPRN_DAR):
463 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
465 case KVM_INST_MFSPR(SPRN_DSISR):
466 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
469 #ifdef CONFIG_PPC_BOOK3E_MMU
470 case KVM_INST_MFSPR(SPRN_MAS0):
471 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
472 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
474 case KVM_INST_MFSPR(SPRN_MAS1):
475 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
476 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
478 case KVM_INST_MFSPR(SPRN_MAS2):
479 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
480 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
482 case KVM_INST_MFSPR(SPRN_MAS3):
483 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
484 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
486 case KVM_INST_MFSPR(SPRN_MAS4):
487 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
488 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
490 case KVM_INST_MFSPR(SPRN_MAS6):
491 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
492 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
494 case KVM_INST_MFSPR(SPRN_MAS7):
495 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
496 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
498 #endif /* CONFIG_PPC_BOOK3E_MMU */
500 case KVM_INST_MFSPR(SPRN_SPRG4):
502 case KVM_INST_MFSPR(SPRN_SPRG4R):
504 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
505 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
507 case KVM_INST_MFSPR(SPRN_SPRG5):
509 case KVM_INST_MFSPR(SPRN_SPRG5R):
511 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
512 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
514 case KVM_INST_MFSPR(SPRN_SPRG6):
516 case KVM_INST_MFSPR(SPRN_SPRG6R):
518 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
519 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
521 case KVM_INST_MFSPR(SPRN_SPRG7):
523 case KVM_INST_MFSPR(SPRN_SPRG7R):
525 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
526 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
530 case KVM_INST_MFSPR(SPRN_ESR):
531 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
532 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
536 case KVM_INST_MFSPR(SPRN_PIR):
537 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
538 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
543 case KVM_INST_MTSPR(SPRN_SPRG0):
544 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
546 case KVM_INST_MTSPR(SPRN_SPRG1):
547 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
549 case KVM_INST_MTSPR(SPRN_SPRG2):
550 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
552 case KVM_INST_MTSPR(SPRN_SPRG3):
553 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
555 case KVM_INST_MTSPR(SPRN_SRR0):
556 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
558 case KVM_INST_MTSPR(SPRN_SRR1):
559 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
562 case KVM_INST_MTSPR(SPRN_DEAR):
564 case KVM_INST_MTSPR(SPRN_DAR):
566 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
568 case KVM_INST_MTSPR(SPRN_DSISR):
569 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
571 #ifdef CONFIG_PPC_BOOK3E_MMU
572 case KVM_INST_MTSPR(SPRN_MAS0):
573 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
574 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
576 case KVM_INST_MTSPR(SPRN_MAS1):
577 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
578 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
580 case KVM_INST_MTSPR(SPRN_MAS2):
581 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
582 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
584 case KVM_INST_MTSPR(SPRN_MAS3):
585 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
586 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
588 case KVM_INST_MTSPR(SPRN_MAS4):
589 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
590 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
592 case KVM_INST_MTSPR(SPRN_MAS6):
593 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
594 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
596 case KVM_INST_MTSPR(SPRN_MAS7):
597 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
598 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
600 #endif /* CONFIG_PPC_BOOK3E_MMU */
602 case KVM_INST_MTSPR(SPRN_SPRG4):
603 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
604 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
606 case KVM_INST_MTSPR(SPRN_SPRG5):
607 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
608 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
610 case KVM_INST_MTSPR(SPRN_SPRG6):
611 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
612 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
614 case KVM_INST_MTSPR(SPRN_SPRG7):
615 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
616 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
620 case KVM_INST_MTSPR(SPRN_ESR):
621 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
622 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
627 case KVM_INST_TLBSYNC:
628 kvm_patch_ins_nop(inst);
632 case KVM_INST_MTMSRD_L1:
633 kvm_patch_ins_mtmsrd(inst, inst_rt);
636 case KVM_INST_MTMSRD_L0:
637 kvm_patch_ins_mtmsr(inst, inst_rt);
641 kvm_patch_ins_wrtee(inst, inst_rt, 0);
646 switch (inst_no_rt & ~KVM_MASK_RB) {
647 #ifdef CONFIG_PPC_BOOK3S_32
648 case KVM_INST_MTSRIN:
649 if (features & KVM_MAGIC_FEAT_SR) {
650 u32 inst_rb = _inst & KVM_MASK_RB;
651 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
659 case KVM_INST_WRTEEI_0:
660 kvm_patch_ins_wrteei_0(inst);
663 case KVM_INST_WRTEEI_1:
664 kvm_patch_ins_wrtee(inst, 0, 1);
670 extern u32 kvm_template_start[];
671 extern u32 kvm_template_end[];
673 static void kvm_use_magic_page(void)
679 /* Tell the host to map the magic page to -4096 on all CPUs */
680 on_each_cpu(kvm_map_magic_page, &features, 1);
682 /* Quick self-test to see if the mapping works */
683 if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
684 kvm_patching_worked = false;
688 /* Now loop through all code and find instructions */
689 start = (void*)_stext;
693 * Being interrupted in the middle of patching would
694 * be bad for SPRG4-7, which KVM can't keep in sync
695 * with emulated accesses because reads don't trap.
699 for (p = start; p < end; p++) {
700 /* Avoid patching the template code */
701 if (p >= kvm_template_start && p < kvm_template_end) {
702 p = kvm_template_end - 1;
705 kvm_check_ins(p, features);
710 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
711 kvm_patching_worked ? "worked" : "failed");
714 static __init void kvm_free_tmp(void)
717 * Inform kmemleak about the hole in the .bss section since the
718 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
720 kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
721 ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
722 free_reserved_area(&kvm_tmp[kvm_tmp_index],
723 &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
726 static int __init kvm_guest_init(void)
728 if (!kvm_para_available())
731 if (!epapr_paravirt_enabled)
734 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
735 kvm_use_magic_page();
737 #ifdef CONFIG_PPC_BOOK3S_64
748 postcore_initcall(kvm_guest_init);