GNU Linux-libre 4.19.211-gnu1
[releases.git] / tools / testing / selftests / kvm / lib / x86.c
1 /*
2  * tools/testing/selftests/kvm/lib/x86.c
3  *
4  * Copyright (C) 2018, Google LLC.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2.
7  */
8
9 #define _GNU_SOURCE /* for program_invocation_name */
10
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "kvm_util_internal.h"
14 #include "x86.h"
15
16 /* Minimum physical address used for virtual translation tables. */
17 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
18
19 /* Virtual translation table structure declarations */
20 struct pageMapL4Entry {
21         uint64_t present:1;
22         uint64_t writable:1;
23         uint64_t user:1;
24         uint64_t write_through:1;
25         uint64_t cache_disable:1;
26         uint64_t accessed:1;
27         uint64_t ignored_06:1;
28         uint64_t page_size:1;
29         uint64_t ignored_11_08:4;
30         uint64_t address:40;
31         uint64_t ignored_62_52:11;
32         uint64_t execute_disable:1;
33 };
34
35 struct pageDirectoryPointerEntry {
36         uint64_t present:1;
37         uint64_t writable:1;
38         uint64_t user:1;
39         uint64_t write_through:1;
40         uint64_t cache_disable:1;
41         uint64_t accessed:1;
42         uint64_t ignored_06:1;
43         uint64_t page_size:1;
44         uint64_t ignored_11_08:4;
45         uint64_t address:40;
46         uint64_t ignored_62_52:11;
47         uint64_t execute_disable:1;
48 };
49
50 struct pageDirectoryEntry {
51         uint64_t present:1;
52         uint64_t writable:1;
53         uint64_t user:1;
54         uint64_t write_through:1;
55         uint64_t cache_disable:1;
56         uint64_t accessed:1;
57         uint64_t ignored_06:1;
58         uint64_t page_size:1;
59         uint64_t ignored_11_08:4;
60         uint64_t address:40;
61         uint64_t ignored_62_52:11;
62         uint64_t execute_disable:1;
63 };
64
65 struct pageTableEntry {
66         uint64_t present:1;
67         uint64_t writable:1;
68         uint64_t user:1;
69         uint64_t write_through:1;
70         uint64_t cache_disable:1;
71         uint64_t accessed:1;
72         uint64_t dirty:1;
73         uint64_t reserved_07:1;
74         uint64_t global:1;
75         uint64_t ignored_11_09:3;
76         uint64_t address:40;
77         uint64_t ignored_62_52:11;
78         uint64_t execute_disable:1;
79 };
80
81 /* Register Dump
82  *
83  * Input Args:
84  *   indent - Left margin indent amount
85  *   regs - register
86  *
87  * Output Args:
88  *   stream - Output FILE stream
89  *
90  * Return: None
91  *
92  * Dumps the state of the registers given by regs, to the FILE stream
93  * given by steam.
94  */
95 void regs_dump(FILE *stream, struct kvm_regs *regs,
96                uint8_t indent)
97 {
98         fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
99                 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
100                 indent, "",
101                 regs->rax, regs->rbx, regs->rcx, regs->rdx);
102         fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
103                 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
104                 indent, "",
105                 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
106         fprintf(stream, "%*sr8:  0x%.16llx r9:  0x%.16llx "
107                 "r10: 0x%.16llx r11: 0x%.16llx\n",
108                 indent, "",
109                 regs->r8, regs->r9, regs->r10, regs->r11);
110         fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
111                 "r14: 0x%.16llx r15: 0x%.16llx\n",
112                 indent, "",
113                 regs->r12, regs->r13, regs->r14, regs->r15);
114         fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
115                 indent, "",
116                 regs->rip, regs->rflags);
117 }
118
119 /* Segment Dump
120  *
121  * Input Args:
122  *   indent - Left margin indent amount
123  *   segment - KVM segment
124  *
125  * Output Args:
126  *   stream - Output FILE stream
127  *
128  * Return: None
129  *
130  * Dumps the state of the KVM segment given by segment, to the FILE stream
131  * given by steam.
132  */
133 static void segment_dump(FILE *stream, struct kvm_segment *segment,
134                          uint8_t indent)
135 {
136         fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
137                 "selector: 0x%.4x type: 0x%.2x\n",
138                 indent, "", segment->base, segment->limit,
139                 segment->selector, segment->type);
140         fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
141                 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
142                 indent, "", segment->present, segment->dpl,
143                 segment->db, segment->s, segment->l);
144         fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
145                 "unusable: 0x%.2x padding: 0x%.2x\n",
146                 indent, "", segment->g, segment->avl,
147                 segment->unusable, segment->padding);
148 }
149
150 /* dtable Dump
151  *
152  * Input Args:
153  *   indent - Left margin indent amount
154  *   dtable - KVM dtable
155  *
156  * Output Args:
157  *   stream - Output FILE stream
158  *
159  * Return: None
160  *
161  * Dumps the state of the KVM dtable given by dtable, to the FILE stream
162  * given by steam.
163  */
164 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
165                         uint8_t indent)
166 {
167         fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
168                 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
169                 indent, "", dtable->base, dtable->limit,
170                 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
171 }
172
173 /* System Register Dump
174  *
175  * Input Args:
176  *   indent - Left margin indent amount
177  *   sregs - System registers
178  *
179  * Output Args:
180  *   stream - Output FILE stream
181  *
182  * Return: None
183  *
184  * Dumps the state of the system registers given by sregs, to the FILE stream
185  * given by steam.
186  */
187 void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
188                 uint8_t indent)
189 {
190         unsigned int i;
191
192         fprintf(stream, "%*scs:\n", indent, "");
193         segment_dump(stream, &sregs->cs, indent + 2);
194         fprintf(stream, "%*sds:\n", indent, "");
195         segment_dump(stream, &sregs->ds, indent + 2);
196         fprintf(stream, "%*ses:\n", indent, "");
197         segment_dump(stream, &sregs->es, indent + 2);
198         fprintf(stream, "%*sfs:\n", indent, "");
199         segment_dump(stream, &sregs->fs, indent + 2);
200         fprintf(stream, "%*sgs:\n", indent, "");
201         segment_dump(stream, &sregs->gs, indent + 2);
202         fprintf(stream, "%*sss:\n", indent, "");
203         segment_dump(stream, &sregs->ss, indent + 2);
204         fprintf(stream, "%*str:\n", indent, "");
205         segment_dump(stream, &sregs->tr, indent + 2);
206         fprintf(stream, "%*sldt:\n", indent, "");
207         segment_dump(stream, &sregs->ldt, indent + 2);
208
209         fprintf(stream, "%*sgdt:\n", indent, "");
210         dtable_dump(stream, &sregs->gdt, indent + 2);
211         fprintf(stream, "%*sidt:\n", indent, "");
212         dtable_dump(stream, &sregs->idt, indent + 2);
213
214         fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
215                 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
216                 indent, "",
217                 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
218         fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
219                 "apic_base: 0x%.16llx\n",
220                 indent, "",
221                 sregs->cr8, sregs->efer, sregs->apic_base);
222
223         fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
224         for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
225                 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
226                         sregs->interrupt_bitmap[i]);
227         }
228 }
229
230 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
231 {
232         int rc;
233
234         TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
235                 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
236
237         /* If needed, create page map l4 table. */
238         if (!vm->pgd_created) {
239                 vm_paddr_t paddr = vm_phy_page_alloc(vm,
240                         KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
241                 vm->pgd = paddr;
242                 vm->pgd_created = true;
243         }
244 }
245
246 /* VM Virtual Page Map
247  *
248  * Input Args:
249  *   vm - Virtual Machine
250  *   vaddr - VM Virtual Address
251  *   paddr - VM Physical Address
252  *   pgd_memslot - Memory region slot for new virtual translation tables
253  *
254  * Output Args: None
255  *
256  * Return: None
257  *
258  * Within the VM given by vm, creates a virtual translation for the page
259  * starting at vaddr to the page starting at paddr.
260  */
261 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
262         uint32_t pgd_memslot)
263 {
264         uint16_t index[4];
265         struct pageMapL4Entry *pml4e;
266
267         TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
268                 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
269
270         TEST_ASSERT((vaddr % vm->page_size) == 0,
271                 "Virtual address not on page boundary,\n"
272                 "  vaddr: 0x%lx vm->page_size: 0x%x",
273                 vaddr, vm->page_size);
274         TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
275                 (vaddr >> vm->page_shift)),
276                 "Invalid virtual address, vaddr: 0x%lx",
277                 vaddr);
278         TEST_ASSERT((paddr % vm->page_size) == 0,
279                 "Physical address not on page boundary,\n"
280                 "  paddr: 0x%lx vm->page_size: 0x%x",
281                 paddr, vm->page_size);
282         TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
283                 "Physical address beyond beyond maximum supported,\n"
284                 "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
285                 paddr, vm->max_gfn, vm->page_size);
286
287         index[0] = (vaddr >> 12) & 0x1ffu;
288         index[1] = (vaddr >> 21) & 0x1ffu;
289         index[2] = (vaddr >> 30) & 0x1ffu;
290         index[3] = (vaddr >> 39) & 0x1ffu;
291
292         /* Allocate page directory pointer table if not present. */
293         pml4e = addr_gpa2hva(vm, vm->pgd);
294         if (!pml4e[index[3]].present) {
295                 pml4e[index[3]].address = vm_phy_page_alloc(vm,
296                         KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
297                         >> vm->page_shift;
298                 pml4e[index[3]].writable = true;
299                 pml4e[index[3]].present = true;
300         }
301
302         /* Allocate page directory table if not present. */
303         struct pageDirectoryPointerEntry *pdpe;
304         pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
305         if (!pdpe[index[2]].present) {
306                 pdpe[index[2]].address = vm_phy_page_alloc(vm,
307                         KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
308                         >> vm->page_shift;
309                 pdpe[index[2]].writable = true;
310                 pdpe[index[2]].present = true;
311         }
312
313         /* Allocate page table if not present. */
314         struct pageDirectoryEntry *pde;
315         pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
316         if (!pde[index[1]].present) {
317                 pde[index[1]].address = vm_phy_page_alloc(vm,
318                         KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
319                         >> vm->page_shift;
320                 pde[index[1]].writable = true;
321                 pde[index[1]].present = true;
322         }
323
324         /* Fill in page table entry. */
325         struct pageTableEntry *pte;
326         pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
327         pte[index[0]].address = paddr >> vm->page_shift;
328         pte[index[0]].writable = true;
329         pte[index[0]].present = 1;
330 }
331
332 /* Virtual Translation Tables Dump
333  *
334  * Input Args:
335  *   vm - Virtual Machine
336  *   indent - Left margin indent amount
337  *
338  * Output Args:
339  *   stream - Output FILE stream
340  *
341  * Return: None
342  *
343  * Dumps to the FILE stream given by stream, the contents of all the
344  * virtual translation tables for the VM given by vm.
345  */
346 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
347 {
348         struct pageMapL4Entry *pml4e, *pml4e_start;
349         struct pageDirectoryPointerEntry *pdpe, *pdpe_start;
350         struct pageDirectoryEntry *pde, *pde_start;
351         struct pageTableEntry *pte, *pte_start;
352
353         if (!vm->pgd_created)
354                 return;
355
356         fprintf(stream, "%*s                                          "
357                 "                no\n", indent, "");
358         fprintf(stream, "%*s      index hvaddr         gpaddr         "
359                 "addr         w exec dirty\n",
360                 indent, "");
361         pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm,
362                 vm->pgd);
363         for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
364                 pml4e = &pml4e_start[n1];
365                 if (!pml4e->present)
366                         continue;
367                 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
368                         " %u\n",
369                         indent, "",
370                         pml4e - pml4e_start, pml4e,
371                         addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address,
372                         pml4e->writable, pml4e->execute_disable);
373
374                 pdpe_start = addr_gpa2hva(vm, pml4e->address
375                         * vm->page_size);
376                 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
377                         pdpe = &pdpe_start[n2];
378                         if (!pdpe->present)
379                                 continue;
380                         fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10lx "
381                                 "%u  %u\n",
382                                 indent, "",
383                                 pdpe - pdpe_start, pdpe,
384                                 addr_hva2gpa(vm, pdpe),
385                                 (uint64_t) pdpe->address, pdpe->writable,
386                                 pdpe->execute_disable);
387
388                         pde_start = addr_gpa2hva(vm,
389                                 pdpe->address * vm->page_size);
390                         for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
391                                 pde = &pde_start[n3];
392                                 if (!pde->present)
393                                         continue;
394                                 fprintf(stream, "%*spde   0x%-3zx %p "
395                                         "0x%-12lx 0x%-10lx %u  %u\n",
396                                         indent, "", pde - pde_start, pde,
397                                         addr_hva2gpa(vm, pde),
398                                         (uint64_t) pde->address, pde->writable,
399                                         pde->execute_disable);
400
401                                 pte_start = addr_gpa2hva(vm,
402                                         pde->address * vm->page_size);
403                                 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
404                                         pte = &pte_start[n4];
405                                         if (!pte->present)
406                                                 continue;
407                                         fprintf(stream, "%*spte   0x%-3zx %p "
408                                                 "0x%-12lx 0x%-10lx %u  %u "
409                                                 "    %u    0x%-10lx\n",
410                                                 indent, "",
411                                                 pte - pte_start, pte,
412                                                 addr_hva2gpa(vm, pte),
413                                                 (uint64_t) pte->address,
414                                                 pte->writable,
415                                                 pte->execute_disable,
416                                                 pte->dirty,
417                                                 ((uint64_t) n1 << 27)
418                                                         | ((uint64_t) n2 << 18)
419                                                         | ((uint64_t) n3 << 9)
420                                                         | ((uint64_t) n4));
421                                 }
422                         }
423                 }
424         }
425 }
426
427 /* Set Unusable Segment
428  *
429  * Input Args: None
430  *
431  * Output Args:
432  *   segp - Pointer to segment register
433  *
434  * Return: None
435  *
436  * Sets the segment register pointed to by segp to an unusable state.
437  */
438 static void kvm_seg_set_unusable(struct kvm_segment *segp)
439 {
440         memset(segp, 0, sizeof(*segp));
441         segp->unusable = true;
442 }
443
444 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
445 {
446         void *gdt = addr_gva2hva(vm, vm->gdt);
447         struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
448
449         desc->limit0 = segp->limit & 0xFFFF;
450         desc->base0 = segp->base & 0xFFFF;
451         desc->base1 = segp->base >> 16;
452         desc->type = segp->type;
453         desc->s = segp->s;
454         desc->dpl = segp->dpl;
455         desc->p = segp->present;
456         desc->limit1 = segp->limit >> 16;
457         desc->avl = segp->avl;
458         desc->l = segp->l;
459         desc->db = segp->db;
460         desc->g = segp->g;
461         desc->base2 = segp->base >> 24;
462         if (!segp->s)
463                 desc->base3 = segp->base >> 32;
464 }
465
466
467 /* Set Long Mode Flat Kernel Code Segment
468  *
469  * Input Args:
470  *   vm - VM whose GDT is being filled, or NULL to only write segp
471  *   selector - selector value
472  *
473  * Output Args:
474  *   segp - Pointer to KVM segment
475  *
476  * Return: None
477  *
478  * Sets up the KVM segment pointed to by segp, to be a code segment
479  * with the selector value given by selector.
480  */
481 static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
482         struct kvm_segment *segp)
483 {
484         memset(segp, 0, sizeof(*segp));
485         segp->selector = selector;
486         segp->limit = 0xFFFFFFFFu;
487         segp->s = 0x1; /* kTypeCodeData */
488         segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
489                                           * | kFlagCodeReadable
490                                           */
491         segp->g = true;
492         segp->l = true;
493         segp->present = 1;
494         if (vm)
495                 kvm_seg_fill_gdt_64bit(vm, segp);
496 }
497
498 /* Set Long Mode Flat Kernel Data Segment
499  *
500  * Input Args:
501  *   vm - VM whose GDT is being filled, or NULL to only write segp
502  *   selector - selector value
503  *
504  * Output Args:
505  *   segp - Pointer to KVM segment
506  *
507  * Return: None
508  *
509  * Sets up the KVM segment pointed to by segp, to be a data segment
510  * with the selector value given by selector.
511  */
512 static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
513         struct kvm_segment *segp)
514 {
515         memset(segp, 0, sizeof(*segp));
516         segp->selector = selector;
517         segp->limit = 0xFFFFFFFFu;
518         segp->s = 0x1; /* kTypeCodeData */
519         segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
520                                           * | kFlagDataWritable
521                                           */
522         segp->g = true;
523         segp->present = true;
524         if (vm)
525                 kvm_seg_fill_gdt_64bit(vm, segp);
526 }
527
528 /* Address Guest Virtual to Guest Physical
529  *
530  * Input Args:
531  *   vm - Virtual Machine
532  *   gpa - VM virtual address
533  *
534  * Output Args: None
535  *
536  * Return:
537  *   Equivalent VM physical address
538  *
539  * Translates the VM virtual address given by gva to a VM physical
540  * address and then locates the memory region containing the VM
541  * physical address, within the VM given by vm.  When found, the host
542  * virtual address providing the memory to the vm physical address is returned.
543  * A TEST_ASSERT failure occurs if no region containing translated
544  * VM virtual address exists.
545  */
546 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
547 {
548         uint16_t index[4];
549         struct pageMapL4Entry *pml4e;
550         struct pageDirectoryPointerEntry *pdpe;
551         struct pageDirectoryEntry *pde;
552         struct pageTableEntry *pte;
553         void *hva;
554
555         TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
556                 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
557
558         index[0] = (gva >> 12) & 0x1ffu;
559         index[1] = (gva >> 21) & 0x1ffu;
560         index[2] = (gva >> 30) & 0x1ffu;
561         index[3] = (gva >> 39) & 0x1ffu;
562
563         if (!vm->pgd_created)
564                 goto unmapped_gva;
565         pml4e = addr_gpa2hva(vm, vm->pgd);
566         if (!pml4e[index[3]].present)
567                 goto unmapped_gva;
568
569         pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
570         if (!pdpe[index[2]].present)
571                 goto unmapped_gva;
572
573         pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
574         if (!pde[index[1]].present)
575                 goto unmapped_gva;
576
577         pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
578         if (!pte[index[0]].present)
579                 goto unmapped_gva;
580
581         return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
582
583 unmapped_gva:
584         TEST_ASSERT(false, "No mapping for vm virtual address, "
585                     "gva: 0x%lx", gva);
586 }
587
588 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt, int gdt_memslot,
589                           int pgd_memslot)
590 {
591         if (!vm->gdt)
592                 vm->gdt = vm_vaddr_alloc(vm, getpagesize(),
593                         KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
594
595         dt->base = vm->gdt;
596         dt->limit = getpagesize();
597 }
598
599 static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
600                                 int selector, int gdt_memslot,
601                                 int pgd_memslot)
602 {
603         if (!vm->tss)
604                 vm->tss = vm_vaddr_alloc(vm, getpagesize(),
605                         KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
606
607         memset(segp, 0, sizeof(*segp));
608         segp->base = vm->tss;
609         segp->limit = 0x67;
610         segp->selector = selector;
611         segp->type = 0xb;
612         segp->present = 1;
613         kvm_seg_fill_gdt_64bit(vm, segp);
614 }
615
616 void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
617 {
618         struct kvm_sregs sregs;
619
620         /* Set mode specific system register values. */
621         vcpu_sregs_get(vm, vcpuid, &sregs);
622
623         sregs.idt.limit = 0;
624
625         kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
626
627         switch (vm->mode) {
628         case VM_MODE_FLAT48PG:
629                 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
630                 sregs.cr4 |= X86_CR4_PAE;
631                 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
632
633                 kvm_seg_set_unusable(&sregs.ldt);
634                 kvm_seg_set_kernel_code_64bit(vm, 0x8, &sregs.cs);
635                 kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.ds);
636                 kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.es);
637                 kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot);
638                 break;
639
640         default:
641                 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
642         }
643
644         sregs.cr3 = vm->pgd;
645         vcpu_sregs_set(vm, vcpuid, &sregs);
646 }
647 /* Adds a vCPU with reasonable defaults (i.e., a stack)
648  *
649  * Input Args:
650  *   vcpuid - The id of the VCPU to add to the VM.
651  *   guest_code - The vCPU's entry point
652  */
653 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
654 {
655         struct kvm_mp_state mp_state;
656         struct kvm_regs regs;
657         vm_vaddr_t stack_vaddr;
658         stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
659                                      DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
660
661         /* Create VCPU */
662         vm_vcpu_add(vm, vcpuid, 0, 0);
663
664         /* Setup guest general purpose registers */
665         vcpu_regs_get(vm, vcpuid, &regs);
666         regs.rflags = regs.rflags | 0x2;
667         regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
668         regs.rip = (unsigned long) guest_code;
669         vcpu_regs_set(vm, vcpuid, &regs);
670
671         /* Setup the MP state */
672         mp_state.mp_state = 0;
673         vcpu_set_mp_state(vm, vcpuid, &mp_state);
674 }
675
676 /* VM VCPU CPUID Set
677  *
678  * Input Args:
679  *   vm - Virtual Machine
680  *   vcpuid - VCPU id
681  *   cpuid - The CPUID values to set.
682  *
683  * Output Args: None
684  *
685  * Return: void
686  *
687  * Set the VCPU's CPUID.
688  */
689 void vcpu_set_cpuid(struct kvm_vm *vm,
690                 uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
691 {
692         struct vcpu *vcpu = vcpu_find(vm, vcpuid);
693         int rc;
694
695         TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
696
697         rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
698         TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
699                     rc, errno);
700
701 }
702 /* Create a VM with reasonable defaults
703  *
704  * Input Args:
705  *   vcpuid - The id of the single VCPU to add to the VM.
706  *   extra_mem_pages - The size of extra memories to add (this will
707  *                     decide how much extra space we will need to
708  *                     setup the page tables using mem slot 0)
709  *   guest_code - The vCPU's entry point
710  *
711  * Output Args: None
712  *
713  * Return:
714  *   Pointer to opaque structure that describes the created VM.
715  */
716 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
717                                  void *guest_code)
718 {
719         struct kvm_vm *vm;
720         /*
721          * For x86 the maximum page table size for a memory region
722          * will be when only 4K pages are used.  In that case the
723          * total extra size for page tables (for extra N pages) will
724          * be: N/512+N/512^2+N/512^3+... which is definitely smaller
725          * than N/512*2.
726          */
727         uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
728
729         /* Create VM */
730         vm = vm_create(VM_MODE_FLAT48PG,
731                        DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
732                        O_RDWR);
733
734         /* Setup guest code */
735         kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
736
737         /* Setup IRQ Chip */
738         vm_create_irqchip(vm);
739
740         /* Add the first vCPU. */
741         vm_vcpu_add_default(vm, vcpuid, guest_code);
742
743         return vm;
744 }
745
746 struct kvm_x86_state {
747         struct kvm_vcpu_events events;
748         struct kvm_mp_state mp_state;
749         struct kvm_regs regs;
750         struct kvm_xsave xsave;
751         struct kvm_xcrs xcrs;
752         struct kvm_sregs sregs;
753         struct kvm_debugregs debugregs;
754         union {
755                 struct kvm_nested_state nested;
756                 char nested_[16384];
757         };
758         struct kvm_msrs msrs;
759 };
760
761 static int kvm_get_num_msrs(struct kvm_vm *vm)
762 {
763         struct kvm_msr_list nmsrs;
764         int r;
765
766         nmsrs.nmsrs = 0;
767         r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
768         TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
769                 r);
770
771         return nmsrs.nmsrs;
772 }
773
774 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
775 {
776         struct vcpu *vcpu = vcpu_find(vm, vcpuid);
777         struct kvm_msr_list *list;
778         struct kvm_x86_state *state;
779         int nmsrs, r, i;
780         static int nested_size = -1;
781
782         if (nested_size == -1) {
783                 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
784                 TEST_ASSERT(nested_size <= sizeof(state->nested_),
785                             "Nested state size too big, %i > %zi",
786                             nested_size, sizeof(state->nested_));
787         }
788
789         nmsrs = kvm_get_num_msrs(vm);
790         list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
791         list->nmsrs = nmsrs;
792         r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
793         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
794                 r);
795
796         state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0]));
797         r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
798         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i",
799                 r);
800
801         r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
802         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i",
803                 r);
804
805         r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
806         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
807                 r);
808
809         r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave);
810         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
811                 r);
812
813         if (kvm_check_cap(KVM_CAP_XCRS)) {
814                 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
815                 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
816                             r);
817         }
818
819         r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
820         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
821                 r);
822
823         if (nested_size) {
824                 state->nested.size = sizeof(state->nested_);
825                 r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
826                 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
827                         r);
828                 TEST_ASSERT(state->nested.size <= nested_size,
829                         "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
830                         state->nested.size, nested_size);
831         } else
832                 state->nested.size = 0;
833
834         state->msrs.nmsrs = nmsrs;
835         for (i = 0; i < nmsrs; i++)
836                 state->msrs.entries[i].index = list->indices[i];
837         r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
838         TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed at %x)",
839                 r, r == nmsrs ? -1 : list->indices[r]);
840
841         r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
842         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i",
843                 r);
844
845         free(list);
846         return state;
847 }
848
849 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state)
850 {
851         struct vcpu *vcpu = vcpu_find(vm, vcpuid);
852         int r;
853
854         if (state->nested.size) {
855                 r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
856                 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
857                         r);
858         }
859
860         r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
861         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
862                 r);
863
864         if (kvm_check_cap(KVM_CAP_XCRS)) {
865                 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
866                 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
867                             r);
868         }
869
870         r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
871         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
872                 r);
873
874         r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
875         TEST_ASSERT(r == state->msrs.nmsrs, "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)",
876                 r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index);
877
878         r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
879         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",
880                 r);
881
882         r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
883         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i",
884                 r);
885
886         r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
887         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i",
888                 r);
889
890         r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
891         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
892                 r);
893 }