2 * tools/testing/selftests/kvm/lib/x86.c
4 * Copyright (C) 2018, Google LLC.
6 * This work is licensed under the terms of the GNU GPL, version 2.
9 #define _GNU_SOURCE /* for program_invocation_name */
11 #include "test_util.h"
13 #include "kvm_util_internal.h"
16 /* Minimum physical address used for virtual translation tables. */
17 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
19 /* Virtual translation table structure declarations */
20 struct pageMapL4Entry {
24 uint64_t write_through:1;
25 uint64_t cache_disable:1;
27 uint64_t ignored_06:1;
29 uint64_t ignored_11_08:4;
31 uint64_t ignored_62_52:11;
32 uint64_t execute_disable:1;
35 struct pageDirectoryPointerEntry {
39 uint64_t write_through:1;
40 uint64_t cache_disable:1;
42 uint64_t ignored_06:1;
44 uint64_t ignored_11_08:4;
46 uint64_t ignored_62_52:11;
47 uint64_t execute_disable:1;
50 struct pageDirectoryEntry {
54 uint64_t write_through:1;
55 uint64_t cache_disable:1;
57 uint64_t ignored_06:1;
59 uint64_t ignored_11_08:4;
61 uint64_t ignored_62_52:11;
62 uint64_t execute_disable:1;
65 struct pageTableEntry {
69 uint64_t write_through:1;
70 uint64_t cache_disable:1;
73 uint64_t reserved_07:1;
75 uint64_t ignored_11_09:3;
77 uint64_t ignored_62_52:11;
78 uint64_t execute_disable:1;
84 * indent - Left margin indent amount
88 * stream - Output FILE stream
92 * Dumps the state of the registers given by regs, to the FILE stream
95 void regs_dump(FILE *stream, struct kvm_regs *regs,
98 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
99 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
101 regs->rax, regs->rbx, regs->rcx, regs->rdx);
102 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
103 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
105 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
106 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
107 "r10: 0x%.16llx r11: 0x%.16llx\n",
109 regs->r8, regs->r9, regs->r10, regs->r11);
110 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
111 "r14: 0x%.16llx r15: 0x%.16llx\n",
113 regs->r12, regs->r13, regs->r14, regs->r15);
114 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
116 regs->rip, regs->rflags);
122 * indent - Left margin indent amount
123 * segment - KVM segment
126 * stream - Output FILE stream
130 * Dumps the state of the KVM segment given by segment, to the FILE stream
133 static void segment_dump(FILE *stream, struct kvm_segment *segment,
136 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
137 "selector: 0x%.4x type: 0x%.2x\n",
138 indent, "", segment->base, segment->limit,
139 segment->selector, segment->type);
140 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
141 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
142 indent, "", segment->present, segment->dpl,
143 segment->db, segment->s, segment->l);
144 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
145 "unusable: 0x%.2x padding: 0x%.2x\n",
146 indent, "", segment->g, segment->avl,
147 segment->unusable, segment->padding);
153 * indent - Left margin indent amount
154 * dtable - KVM dtable
157 * stream - Output FILE stream
161 * Dumps the state of the KVM dtable given by dtable, to the FILE stream
164 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
167 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
168 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
169 indent, "", dtable->base, dtable->limit,
170 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
173 /* System Register Dump
176 * indent - Left margin indent amount
177 * sregs - System registers
180 * stream - Output FILE stream
184 * Dumps the state of the system registers given by sregs, to the FILE stream
187 void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
192 fprintf(stream, "%*scs:\n", indent, "");
193 segment_dump(stream, &sregs->cs, indent + 2);
194 fprintf(stream, "%*sds:\n", indent, "");
195 segment_dump(stream, &sregs->ds, indent + 2);
196 fprintf(stream, "%*ses:\n", indent, "");
197 segment_dump(stream, &sregs->es, indent + 2);
198 fprintf(stream, "%*sfs:\n", indent, "");
199 segment_dump(stream, &sregs->fs, indent + 2);
200 fprintf(stream, "%*sgs:\n", indent, "");
201 segment_dump(stream, &sregs->gs, indent + 2);
202 fprintf(stream, "%*sss:\n", indent, "");
203 segment_dump(stream, &sregs->ss, indent + 2);
204 fprintf(stream, "%*str:\n", indent, "");
205 segment_dump(stream, &sregs->tr, indent + 2);
206 fprintf(stream, "%*sldt:\n", indent, "");
207 segment_dump(stream, &sregs->ldt, indent + 2);
209 fprintf(stream, "%*sgdt:\n", indent, "");
210 dtable_dump(stream, &sregs->gdt, indent + 2);
211 fprintf(stream, "%*sidt:\n", indent, "");
212 dtable_dump(stream, &sregs->idt, indent + 2);
214 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
215 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
217 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
218 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
219 "apic_base: 0x%.16llx\n",
221 sregs->cr8, sregs->efer, sregs->apic_base);
223 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
224 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
225 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
226 sregs->interrupt_bitmap[i]);
230 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
234 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
235 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
237 /* If needed, create page map l4 table. */
238 if (!vm->pgd_created) {
239 vm_paddr_t paddr = vm_phy_page_alloc(vm,
240 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
242 vm->pgd_created = true;
246 /* VM Virtual Page Map
249 * vm - Virtual Machine
250 * vaddr - VM Virtual Address
251 * paddr - VM Physical Address
252 * pgd_memslot - Memory region slot for new virtual translation tables
258 * Within the VM given by vm, creates a virtual translation for the page
259 * starting at vaddr to the page starting at paddr.
261 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
262 uint32_t pgd_memslot)
265 struct pageMapL4Entry *pml4e;
267 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
268 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
270 TEST_ASSERT((vaddr % vm->page_size) == 0,
271 "Virtual address not on page boundary,\n"
272 " vaddr: 0x%lx vm->page_size: 0x%x",
273 vaddr, vm->page_size);
274 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
275 (vaddr >> vm->page_shift)),
276 "Invalid virtual address, vaddr: 0x%lx",
278 TEST_ASSERT((paddr % vm->page_size) == 0,
279 "Physical address not on page boundary,\n"
280 " paddr: 0x%lx vm->page_size: 0x%x",
281 paddr, vm->page_size);
282 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
283 "Physical address beyond beyond maximum supported,\n"
284 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
285 paddr, vm->max_gfn, vm->page_size);
287 index[0] = (vaddr >> 12) & 0x1ffu;
288 index[1] = (vaddr >> 21) & 0x1ffu;
289 index[2] = (vaddr >> 30) & 0x1ffu;
290 index[3] = (vaddr >> 39) & 0x1ffu;
292 /* Allocate page directory pointer table if not present. */
293 pml4e = addr_gpa2hva(vm, vm->pgd);
294 if (!pml4e[index[3]].present) {
295 pml4e[index[3]].address = vm_phy_page_alloc(vm,
296 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
298 pml4e[index[3]].writable = true;
299 pml4e[index[3]].present = true;
302 /* Allocate page directory table if not present. */
303 struct pageDirectoryPointerEntry *pdpe;
304 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
305 if (!pdpe[index[2]].present) {
306 pdpe[index[2]].address = vm_phy_page_alloc(vm,
307 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
309 pdpe[index[2]].writable = true;
310 pdpe[index[2]].present = true;
313 /* Allocate page table if not present. */
314 struct pageDirectoryEntry *pde;
315 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
316 if (!pde[index[1]].present) {
317 pde[index[1]].address = vm_phy_page_alloc(vm,
318 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
320 pde[index[1]].writable = true;
321 pde[index[1]].present = true;
324 /* Fill in page table entry. */
325 struct pageTableEntry *pte;
326 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
327 pte[index[0]].address = paddr >> vm->page_shift;
328 pte[index[0]].writable = true;
329 pte[index[0]].present = 1;
332 /* Virtual Translation Tables Dump
335 * vm - Virtual Machine
336 * indent - Left margin indent amount
339 * stream - Output FILE stream
343 * Dumps to the FILE stream given by stream, the contents of all the
344 * virtual translation tables for the VM given by vm.
346 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
348 struct pageMapL4Entry *pml4e, *pml4e_start;
349 struct pageDirectoryPointerEntry *pdpe, *pdpe_start;
350 struct pageDirectoryEntry *pde, *pde_start;
351 struct pageTableEntry *pte, *pte_start;
353 if (!vm->pgd_created)
356 fprintf(stream, "%*s "
357 " no\n", indent, "");
358 fprintf(stream, "%*s index hvaddr gpaddr "
359 "addr w exec dirty\n",
361 pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm,
363 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
364 pml4e = &pml4e_start[n1];
367 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
370 pml4e - pml4e_start, pml4e,
371 addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address,
372 pml4e->writable, pml4e->execute_disable);
374 pdpe_start = addr_gpa2hva(vm, pml4e->address
376 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
377 pdpe = &pdpe_start[n2];
380 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
383 pdpe - pdpe_start, pdpe,
384 addr_hva2gpa(vm, pdpe),
385 (uint64_t) pdpe->address, pdpe->writable,
386 pdpe->execute_disable);
388 pde_start = addr_gpa2hva(vm,
389 pdpe->address * vm->page_size);
390 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
391 pde = &pde_start[n3];
394 fprintf(stream, "%*spde 0x%-3zx %p "
395 "0x%-12lx 0x%-10lx %u %u\n",
396 indent, "", pde - pde_start, pde,
397 addr_hva2gpa(vm, pde),
398 (uint64_t) pde->address, pde->writable,
399 pde->execute_disable);
401 pte_start = addr_gpa2hva(vm,
402 pde->address * vm->page_size);
403 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
404 pte = &pte_start[n4];
407 fprintf(stream, "%*spte 0x%-3zx %p "
408 "0x%-12lx 0x%-10lx %u %u "
411 pte - pte_start, pte,
412 addr_hva2gpa(vm, pte),
413 (uint64_t) pte->address,
415 pte->execute_disable,
417 ((uint64_t) n1 << 27)
418 | ((uint64_t) n2 << 18)
419 | ((uint64_t) n3 << 9)
427 /* Set Unusable Segment
432 * segp - Pointer to segment register
436 * Sets the segment register pointed to by segp to an unusable state.
438 static void kvm_seg_set_unusable(struct kvm_segment *segp)
440 memset(segp, 0, sizeof(*segp));
441 segp->unusable = true;
444 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
446 void *gdt = addr_gva2hva(vm, vm->gdt);
447 struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
449 desc->limit0 = segp->limit & 0xFFFF;
450 desc->base0 = segp->base & 0xFFFF;
451 desc->base1 = segp->base >> 16;
452 desc->type = segp->type;
454 desc->dpl = segp->dpl;
455 desc->p = segp->present;
456 desc->limit1 = segp->limit >> 16;
457 desc->avl = segp->avl;
461 desc->base2 = segp->base >> 24;
463 desc->base3 = segp->base >> 32;
467 /* Set Long Mode Flat Kernel Code Segment
470 * vm - VM whose GDT is being filled, or NULL to only write segp
471 * selector - selector value
474 * segp - Pointer to KVM segment
478 * Sets up the KVM segment pointed to by segp, to be a code segment
479 * with the selector value given by selector.
481 static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
482 struct kvm_segment *segp)
484 memset(segp, 0, sizeof(*segp));
485 segp->selector = selector;
486 segp->limit = 0xFFFFFFFFu;
487 segp->s = 0x1; /* kTypeCodeData */
488 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
489 * | kFlagCodeReadable
495 kvm_seg_fill_gdt_64bit(vm, segp);
498 /* Set Long Mode Flat Kernel Data Segment
501 * vm - VM whose GDT is being filled, or NULL to only write segp
502 * selector - selector value
505 * segp - Pointer to KVM segment
509 * Sets up the KVM segment pointed to by segp, to be a data segment
510 * with the selector value given by selector.
512 static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
513 struct kvm_segment *segp)
515 memset(segp, 0, sizeof(*segp));
516 segp->selector = selector;
517 segp->limit = 0xFFFFFFFFu;
518 segp->s = 0x1; /* kTypeCodeData */
519 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
520 * | kFlagDataWritable
523 segp->present = true;
525 kvm_seg_fill_gdt_64bit(vm, segp);
528 /* Address Guest Virtual to Guest Physical
531 * vm - Virtual Machine
532 * gpa - VM virtual address
537 * Equivalent VM physical address
539 * Translates the VM virtual address given by gva to a VM physical
540 * address and then locates the memory region containing the VM
541 * physical address, within the VM given by vm. When found, the host
542 * virtual address providing the memory to the vm physical address is returned.
543 * A TEST_ASSERT failure occurs if no region containing translated
544 * VM virtual address exists.
546 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
549 struct pageMapL4Entry *pml4e;
550 struct pageDirectoryPointerEntry *pdpe;
551 struct pageDirectoryEntry *pde;
552 struct pageTableEntry *pte;
555 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
556 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
558 index[0] = (gva >> 12) & 0x1ffu;
559 index[1] = (gva >> 21) & 0x1ffu;
560 index[2] = (gva >> 30) & 0x1ffu;
561 index[3] = (gva >> 39) & 0x1ffu;
563 if (!vm->pgd_created)
565 pml4e = addr_gpa2hva(vm, vm->pgd);
566 if (!pml4e[index[3]].present)
569 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
570 if (!pdpe[index[2]].present)
573 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
574 if (!pde[index[1]].present)
577 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
578 if (!pte[index[0]].present)
581 return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
584 TEST_ASSERT(false, "No mapping for vm virtual address, "
588 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt, int gdt_memslot,
592 vm->gdt = vm_vaddr_alloc(vm, getpagesize(),
593 KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
596 dt->limit = getpagesize();
599 static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
600 int selector, int gdt_memslot,
604 vm->tss = vm_vaddr_alloc(vm, getpagesize(),
605 KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
607 memset(segp, 0, sizeof(*segp));
608 segp->base = vm->tss;
610 segp->selector = selector;
613 kvm_seg_fill_gdt_64bit(vm, segp);
616 void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
618 struct kvm_sregs sregs;
620 /* Set mode specific system register values. */
621 vcpu_sregs_get(vm, vcpuid, &sregs);
625 kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
628 case VM_MODE_FLAT48PG:
629 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
630 sregs.cr4 |= X86_CR4_PAE;
631 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
633 kvm_seg_set_unusable(&sregs.ldt);
634 kvm_seg_set_kernel_code_64bit(vm, 0x8, &sregs.cs);
635 kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.ds);
636 kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.es);
637 kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot);
641 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
645 vcpu_sregs_set(vm, vcpuid, &sregs);
647 /* Adds a vCPU with reasonable defaults (i.e., a stack)
650 * vcpuid - The id of the VCPU to add to the VM.
651 * guest_code - The vCPU's entry point
653 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
655 struct kvm_mp_state mp_state;
656 struct kvm_regs regs;
657 vm_vaddr_t stack_vaddr;
658 stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
659 DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
662 vm_vcpu_add(vm, vcpuid, 0, 0);
664 /* Setup guest general purpose registers */
665 vcpu_regs_get(vm, vcpuid, ®s);
666 regs.rflags = regs.rflags | 0x2;
667 regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
668 regs.rip = (unsigned long) guest_code;
669 vcpu_regs_set(vm, vcpuid, ®s);
671 /* Setup the MP state */
672 mp_state.mp_state = 0;
673 vcpu_set_mp_state(vm, vcpuid, &mp_state);
679 * vm - Virtual Machine
681 * cpuid - The CPUID values to set.
687 * Set the VCPU's CPUID.
689 void vcpu_set_cpuid(struct kvm_vm *vm,
690 uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
692 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
695 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
697 rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
698 TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
702 /* Create a VM with reasonable defaults
705 * vcpuid - The id of the single VCPU to add to the VM.
706 * extra_mem_pages - The size of extra memories to add (this will
707 * decide how much extra space we will need to
708 * setup the page tables using mem slot 0)
709 * guest_code - The vCPU's entry point
714 * Pointer to opaque structure that describes the created VM.
716 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
721 * For x86 the maximum page table size for a memory region
722 * will be when only 4K pages are used. In that case the
723 * total extra size for page tables (for extra N pages) will
724 * be: N/512+N/512^2+N/512^3+... which is definitely smaller
727 uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
730 vm = vm_create(VM_MODE_FLAT48PG,
731 DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
734 /* Setup guest code */
735 kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
738 vm_create_irqchip(vm);
740 /* Add the first vCPU. */
741 vm_vcpu_add_default(vm, vcpuid, guest_code);
746 struct kvm_x86_state {
747 struct kvm_vcpu_events events;
748 struct kvm_mp_state mp_state;
749 struct kvm_regs regs;
750 struct kvm_xsave xsave;
751 struct kvm_xcrs xcrs;
752 struct kvm_sregs sregs;
753 struct kvm_debugregs debugregs;
755 struct kvm_nested_state nested;
758 struct kvm_msrs msrs;
761 static int kvm_get_num_msrs(struct kvm_vm *vm)
763 struct kvm_msr_list nmsrs;
767 r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
768 TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
774 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
776 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
777 struct kvm_msr_list *list;
778 struct kvm_x86_state *state;
780 static int nested_size = -1;
782 if (nested_size == -1) {
783 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
784 TEST_ASSERT(nested_size <= sizeof(state->nested_),
785 "Nested state size too big, %i > %zi",
786 nested_size, sizeof(state->nested_));
789 nmsrs = kvm_get_num_msrs(vm);
790 list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
792 r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
793 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
796 state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0]));
797 r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
798 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i",
801 r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
802 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i",
805 r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
806 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
809 r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave);
810 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
813 if (kvm_check_cap(KVM_CAP_XCRS)) {
814 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
815 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
819 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
820 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
824 state->nested.size = sizeof(state->nested_);
825 r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
826 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
828 TEST_ASSERT(state->nested.size <= nested_size,
829 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
830 state->nested.size, nested_size);
832 state->nested.size = 0;
834 state->msrs.nmsrs = nmsrs;
835 for (i = 0; i < nmsrs; i++)
836 state->msrs.entries[i].index = list->indices[i];
837 r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
838 TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed at %x)",
839 r, r == nmsrs ? -1 : list->indices[r]);
841 r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
842 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i",
849 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state)
851 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
854 if (state->nested.size) {
855 r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
856 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
860 r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
861 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
864 if (kvm_check_cap(KVM_CAP_XCRS)) {
865 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
866 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
870 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
871 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
874 r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
875 TEST_ASSERT(r == state->msrs.nmsrs, "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)",
876 r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index);
878 r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
879 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",
882 r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
883 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i",
886 r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
887 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i",
890 r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
891 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",