arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / arch / loongarch / kvm / exit.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <asm/fpu.h>
13 #include <asm/inst.h>
14 #include <asm/loongarch.h>
15 #include <asm/mmzone.h>
16 #include <asm/numa.h>
17 #include <asm/time.h>
18 #include <asm/tlb.h>
19 #include <asm/kvm_csr.h>
20 #include <asm/kvm_vcpu.h>
21 #include "trace.h"
22
23 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
24 {
25         unsigned long val = 0;
26         struct loongarch_csrs *csr = vcpu->arch.csr;
27
28         /*
29          * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
30          * For undefined CSR id, return value is 0
31          */
32         if (get_gcsr_flag(csrid) & SW_GCSR)
33                 val = kvm_read_sw_gcsr(csr, csrid);
34         else
35                 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
36
37         return val;
38 }
39
40 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
41 {
42         unsigned long old = 0;
43         struct loongarch_csrs *csr = vcpu->arch.csr;
44
45         if (get_gcsr_flag(csrid) & SW_GCSR) {
46                 old = kvm_read_sw_gcsr(csr, csrid);
47                 kvm_write_sw_gcsr(csr, csrid, val);
48         } else
49                 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
50
51         return old;
52 }
53
54 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
55                                 unsigned long csr_mask, unsigned long val)
56 {
57         unsigned long old = 0;
58         struct loongarch_csrs *csr = vcpu->arch.csr;
59
60         if (get_gcsr_flag(csrid) & SW_GCSR) {
61                 old = kvm_read_sw_gcsr(csr, csrid);
62                 val = (old & ~csr_mask) | (val & csr_mask);
63                 kvm_write_sw_gcsr(csr, csrid, val);
64                 old = old & csr_mask;
65         } else
66                 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
67
68         return old;
69 }
70
71 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
72 {
73         unsigned int rd, rj, csrid;
74         unsigned long csr_mask, val = 0;
75
76         /*
77          * CSR value mask imm
78          * rj = 0 means csrrd
79          * rj = 1 means csrwr
80          * rj != 0,1 means csrxchg
81          */
82         rd = inst.reg2csr_format.rd;
83         rj = inst.reg2csr_format.rj;
84         csrid = inst.reg2csr_format.csr;
85
86         /* Process CSR ops */
87         switch (rj) {
88         case 0: /* process csrrd */
89                 val = kvm_emu_read_csr(vcpu, csrid);
90                 vcpu->arch.gprs[rd] = val;
91                 break;
92         case 1: /* process csrwr */
93                 val = vcpu->arch.gprs[rd];
94                 val = kvm_emu_write_csr(vcpu, csrid, val);
95                 vcpu->arch.gprs[rd] = val;
96                 break;
97         default: /* process csrxchg */
98                 val = vcpu->arch.gprs[rd];
99                 csr_mask = vcpu->arch.gprs[rj];
100                 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
101                 vcpu->arch.gprs[rd] = val;
102         }
103
104         return EMULATE_DONE;
105 }
106
107 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
108 {
109         int ret;
110         unsigned long val;
111         u32 addr, rd, rj, opcode;
112
113         /*
114          * Each IOCSR with different opcode
115          */
116         rd = inst.reg2_format.rd;
117         rj = inst.reg2_format.rj;
118         opcode = inst.reg2_format.opcode;
119         addr = vcpu->arch.gprs[rj];
120         ret = EMULATE_DO_IOCSR;
121         run->iocsr_io.phys_addr = addr;
122         run->iocsr_io.is_write = 0;
123
124         /* LoongArch is Little endian */
125         switch (opcode) {
126         case iocsrrdb_op:
127                 run->iocsr_io.len = 1;
128                 break;
129         case iocsrrdh_op:
130                 run->iocsr_io.len = 2;
131                 break;
132         case iocsrrdw_op:
133                 run->iocsr_io.len = 4;
134                 break;
135         case iocsrrdd_op:
136                 run->iocsr_io.len = 8;
137                 break;
138         case iocsrwrb_op:
139                 run->iocsr_io.len = 1;
140                 run->iocsr_io.is_write = 1;
141                 break;
142         case iocsrwrh_op:
143                 run->iocsr_io.len = 2;
144                 run->iocsr_io.is_write = 1;
145                 break;
146         case iocsrwrw_op:
147                 run->iocsr_io.len = 4;
148                 run->iocsr_io.is_write = 1;
149                 break;
150         case iocsrwrd_op:
151                 run->iocsr_io.len = 8;
152                 run->iocsr_io.is_write = 1;
153                 break;
154         default:
155                 ret = EMULATE_FAIL;
156                 break;
157         }
158
159         if (ret == EMULATE_DO_IOCSR) {
160                 if (run->iocsr_io.is_write) {
161                         val = vcpu->arch.gprs[rd];
162                         memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
163                 }
164                 vcpu->arch.io_gpr = rd;
165         }
166
167         return ret;
168 }
169
170 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
171 {
172         enum emulation_result er = EMULATE_DONE;
173         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
174
175         switch (run->iocsr_io.len) {
176         case 1:
177                 *gpr = *(s8 *)run->iocsr_io.data;
178                 break;
179         case 2:
180                 *gpr = *(s16 *)run->iocsr_io.data;
181                 break;
182         case 4:
183                 *gpr = *(s32 *)run->iocsr_io.data;
184                 break;
185         case 8:
186                 *gpr = *(s64 *)run->iocsr_io.data;
187                 break;
188         default:
189                 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
190                                 run->iocsr_io.len, vcpu->arch.badv);
191                 er = EMULATE_FAIL;
192                 break;
193         }
194
195         return er;
196 }
197
198 int kvm_emu_idle(struct kvm_vcpu *vcpu)
199 {
200         ++vcpu->stat.idle_exits;
201         trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
202
203         if (!kvm_arch_vcpu_runnable(vcpu)) {
204                 /*
205                  * Switch to the software timer before halt-polling/blocking as
206                  * the guest's timer may be a break event for the vCPU, and the
207                  * hypervisor timer runs only when the CPU is in guest mode.
208                  * Switch before halt-polling so that KVM recognizes an expired
209                  * timer before blocking.
210                  */
211                 kvm_save_timer(vcpu);
212                 kvm_vcpu_block(vcpu);
213         }
214
215         return EMULATE_DONE;
216 }
217
218 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
219 {
220         int rd, rj;
221         unsigned int index;
222         unsigned long curr_pc;
223         larch_inst inst;
224         enum emulation_result er = EMULATE_DONE;
225         struct kvm_run *run = vcpu->run;
226
227         /* Fetch the instruction */
228         inst.word = vcpu->arch.badi;
229         curr_pc = vcpu->arch.pc;
230         update_pc(&vcpu->arch);
231
232         trace_kvm_exit_gspr(vcpu, inst.word);
233         er = EMULATE_FAIL;
234         switch (((inst.word >> 24) & 0xff)) {
235         case 0x0: /* CPUCFG GSPR */
236                 if (inst.reg2_format.opcode == 0x1B) {
237                         rd = inst.reg2_format.rd;
238                         rj = inst.reg2_format.rj;
239                         ++vcpu->stat.cpucfg_exits;
240                         index = vcpu->arch.gprs[rj];
241                         er = EMULATE_DONE;
242                         /*
243                          * By LoongArch Reference Manual 2.2.10.5
244                          * return value is 0 for undefined cpucfg index
245                          */
246                         if (index < KVM_MAX_CPUCFG_REGS)
247                                 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
248                         else
249                                 vcpu->arch.gprs[rd] = 0;
250                 }
251                 break;
252         case 0x4: /* CSR{RD,WR,XCHG} GSPR */
253                 er = kvm_handle_csr(vcpu, inst);
254                 break;
255         case 0x6: /* Cache, Idle and IOCSR GSPR */
256                 switch (((inst.word >> 22) & 0x3ff)) {
257                 case 0x18: /* Cache GSPR */
258                         er = EMULATE_DONE;
259                         trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
260                         break;
261                 case 0x19: /* Idle/IOCSR GSPR */
262                         switch (((inst.word >> 15) & 0x1ffff)) {
263                         case 0xc90: /* IOCSR GSPR */
264                                 er = kvm_emu_iocsr(inst, run, vcpu);
265                                 break;
266                         case 0xc91: /* Idle GSPR */
267                                 er = kvm_emu_idle(vcpu);
268                                 break;
269                         default:
270                                 er = EMULATE_FAIL;
271                                 break;
272                         }
273                         break;
274                 default:
275                         er = EMULATE_FAIL;
276                         break;
277                 }
278                 break;
279         default:
280                 er = EMULATE_FAIL;
281                 break;
282         }
283
284         /* Rollback PC only if emulation was unsuccessful */
285         if (er == EMULATE_FAIL) {
286                 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
287                         curr_pc, __func__, inst.word);
288
289                 kvm_arch_vcpu_dump_regs(vcpu);
290                 vcpu->arch.pc = curr_pc;
291         }
292
293         return er;
294 }
295
296 /*
297  * Trigger GSPR:
298  * 1) Execute CPUCFG instruction;
299  * 2) Execute CACOP/IDLE instructions;
300  * 3) Access to unimplemented CSRs/IOCSRs.
301  */
302 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
303 {
304         int ret = RESUME_GUEST;
305         enum emulation_result er = EMULATE_DONE;
306
307         er = kvm_trap_handle_gspr(vcpu);
308
309         if (er == EMULATE_DONE) {
310                 ret = RESUME_GUEST;
311         } else if (er == EMULATE_DO_MMIO) {
312                 vcpu->run->exit_reason = KVM_EXIT_MMIO;
313                 ret = RESUME_HOST;
314         } else if (er == EMULATE_DO_IOCSR) {
315                 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
316                 ret = RESUME_HOST;
317         } else {
318                 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
319                 ret = RESUME_GUEST;
320         }
321
322         return ret;
323 }
324
325 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
326 {
327         int ret;
328         unsigned int op8, opcode, rd;
329         struct kvm_run *run = vcpu->run;
330
331         run->mmio.phys_addr = vcpu->arch.badv;
332         vcpu->mmio_needed = 2;  /* signed */
333         op8 = (inst.word >> 24) & 0xff;
334         ret = EMULATE_DO_MMIO;
335
336         switch (op8) {
337         case 0x24 ... 0x27:     /* ldptr.w/d process */
338                 rd = inst.reg2i14_format.rd;
339                 opcode = inst.reg2i14_format.opcode;
340
341                 switch (opcode) {
342                 case ldptrw_op:
343                         run->mmio.len = 4;
344                         break;
345                 case ldptrd_op:
346                         run->mmio.len = 8;
347                         break;
348                 default:
349                         break;
350                 }
351                 break;
352         case 0x28 ... 0x2e:     /* ld.b/h/w/d, ld.bu/hu/wu process */
353                 rd = inst.reg2i12_format.rd;
354                 opcode = inst.reg2i12_format.opcode;
355
356                 switch (opcode) {
357                 case ldb_op:
358                         run->mmio.len = 1;
359                         break;
360                 case ldbu_op:
361                         vcpu->mmio_needed = 1;  /* unsigned */
362                         run->mmio.len = 1;
363                         break;
364                 case ldh_op:
365                         run->mmio.len = 2;
366                         break;
367                 case ldhu_op:
368                         vcpu->mmio_needed = 1;  /* unsigned */
369                         run->mmio.len = 2;
370                         break;
371                 case ldw_op:
372                         run->mmio.len = 4;
373                         break;
374                 case ldwu_op:
375                         vcpu->mmio_needed = 1;  /* unsigned */
376                         run->mmio.len = 4;
377                         break;
378                 case ldd_op:
379                         run->mmio.len = 8;
380                         break;
381                 default:
382                         ret = EMULATE_FAIL;
383                         break;
384                 }
385                 break;
386         case 0x38:      /* ldx.b/h/w/d, ldx.bu/hu/wu process */
387                 rd = inst.reg3_format.rd;
388                 opcode = inst.reg3_format.opcode;
389
390                 switch (opcode) {
391                 case ldxb_op:
392                         run->mmio.len = 1;
393                         break;
394                 case ldxbu_op:
395                         run->mmio.len = 1;
396                         vcpu->mmio_needed = 1;  /* unsigned */
397                         break;
398                 case ldxh_op:
399                         run->mmio.len = 2;
400                         break;
401                 case ldxhu_op:
402                         run->mmio.len = 2;
403                         vcpu->mmio_needed = 1;  /* unsigned */
404                         break;
405                 case ldxw_op:
406                         run->mmio.len = 4;
407                         break;
408                 case ldxwu_op:
409                         run->mmio.len = 4;
410                         vcpu->mmio_needed = 1;  /* unsigned */
411                         break;
412                 case ldxd_op:
413                         run->mmio.len = 8;
414                         break;
415                 default:
416                         ret = EMULATE_FAIL;
417                         break;
418                 }
419                 break;
420         default:
421                 ret = EMULATE_FAIL;
422         }
423
424         if (ret == EMULATE_DO_MMIO) {
425                 /* Set for kvm_complete_mmio_read() use */
426                 vcpu->arch.io_gpr = rd;
427                 run->mmio.is_write = 0;
428                 vcpu->mmio_is_write = 0;
429         } else {
430                 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
431                         inst.word, vcpu->arch.pc, vcpu->arch.badv);
432                 kvm_arch_vcpu_dump_regs(vcpu);
433                 vcpu->mmio_needed = 0;
434         }
435
436         return ret;
437 }
438
439 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
440 {
441         enum emulation_result er = EMULATE_DONE;
442         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
443
444         /* Update with new PC */
445         update_pc(&vcpu->arch);
446         switch (run->mmio.len) {
447         case 1:
448                 if (vcpu->mmio_needed == 2)
449                         *gpr = *(s8 *)run->mmio.data;
450                 else
451                         *gpr = *(u8 *)run->mmio.data;
452                 break;
453         case 2:
454                 if (vcpu->mmio_needed == 2)
455                         *gpr = *(s16 *)run->mmio.data;
456                 else
457                         *gpr = *(u16 *)run->mmio.data;
458                 break;
459         case 4:
460                 if (vcpu->mmio_needed == 2)
461                         *gpr = *(s32 *)run->mmio.data;
462                 else
463                         *gpr = *(u32 *)run->mmio.data;
464                 break;
465         case 8:
466                 *gpr = *(s64 *)run->mmio.data;
467                 break;
468         default:
469                 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
470                                 run->mmio.len, vcpu->arch.badv);
471                 er = EMULATE_FAIL;
472                 break;
473         }
474
475         return er;
476 }
477
478 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
479 {
480         int ret;
481         unsigned int rd, op8, opcode;
482         unsigned long curr_pc, rd_val = 0;
483         struct kvm_run *run = vcpu->run;
484         void *data = run->mmio.data;
485
486         /*
487          * Update PC and hold onto current PC in case there is
488          * an error and we want to rollback the PC
489          */
490         curr_pc = vcpu->arch.pc;
491         update_pc(&vcpu->arch);
492
493         op8 = (inst.word >> 24) & 0xff;
494         run->mmio.phys_addr = vcpu->arch.badv;
495         ret = EMULATE_DO_MMIO;
496         switch (op8) {
497         case 0x24 ... 0x27:     /* stptr.w/d process */
498                 rd = inst.reg2i14_format.rd;
499                 opcode = inst.reg2i14_format.opcode;
500
501                 switch (opcode) {
502                 case stptrw_op:
503                         run->mmio.len = 4;
504                         *(unsigned int *)data = vcpu->arch.gprs[rd];
505                         break;
506                 case stptrd_op:
507                         run->mmio.len = 8;
508                         *(unsigned long *)data = vcpu->arch.gprs[rd];
509                         break;
510                 default:
511                         ret = EMULATE_FAIL;
512                         break;
513                 }
514                 break;
515         case 0x28 ... 0x2e:     /* st.b/h/w/d  process */
516                 rd = inst.reg2i12_format.rd;
517                 opcode = inst.reg2i12_format.opcode;
518                 rd_val = vcpu->arch.gprs[rd];
519
520                 switch (opcode) {
521                 case stb_op:
522                         run->mmio.len = 1;
523                         *(unsigned char *)data = rd_val;
524                         break;
525                 case sth_op:
526                         run->mmio.len = 2;
527                         *(unsigned short *)data = rd_val;
528                         break;
529                 case stw_op:
530                         run->mmio.len = 4;
531                         *(unsigned int *)data = rd_val;
532                         break;
533                 case std_op:
534                         run->mmio.len = 8;
535                         *(unsigned long *)data = rd_val;
536                         break;
537                 default:
538                         ret = EMULATE_FAIL;
539                         break;
540                 }
541                 break;
542         case 0x38:      /* stx.b/h/w/d process */
543                 rd = inst.reg3_format.rd;
544                 opcode = inst.reg3_format.opcode;
545
546                 switch (opcode) {
547                 case stxb_op:
548                         run->mmio.len = 1;
549                         *(unsigned char *)data = vcpu->arch.gprs[rd];
550                         break;
551                 case stxh_op:
552                         run->mmio.len = 2;
553                         *(unsigned short *)data = vcpu->arch.gprs[rd];
554                         break;
555                 case stxw_op:
556                         run->mmio.len = 4;
557                         *(unsigned int *)data = vcpu->arch.gprs[rd];
558                         break;
559                 case stxd_op:
560                         run->mmio.len = 8;
561                         *(unsigned long *)data = vcpu->arch.gprs[rd];
562                         break;
563                 default:
564                         ret = EMULATE_FAIL;
565                         break;
566                 }
567                 break;
568         default:
569                 ret = EMULATE_FAIL;
570         }
571
572         if (ret == EMULATE_DO_MMIO) {
573                 run->mmio.is_write = 1;
574                 vcpu->mmio_needed = 1;
575                 vcpu->mmio_is_write = 1;
576         } else {
577                 vcpu->arch.pc = curr_pc;
578                 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
579                         inst.word, vcpu->arch.pc, vcpu->arch.badv);
580                 kvm_arch_vcpu_dump_regs(vcpu);
581                 /* Rollback PC if emulation was unsuccessful */
582         }
583
584         return ret;
585 }
586
587 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
588 {
589         int ret;
590         larch_inst inst;
591         enum emulation_result er = EMULATE_DONE;
592         struct kvm_run *run = vcpu->run;
593         unsigned long badv = vcpu->arch.badv;
594
595         ret = kvm_handle_mm_fault(vcpu, badv, write);
596         if (ret) {
597                 /* Treat as MMIO */
598                 inst.word = vcpu->arch.badi;
599                 if (write) {
600                         er = kvm_emu_mmio_write(vcpu, inst);
601                 } else {
602                         /* A code fetch fault doesn't count as an MMIO */
603                         if (kvm_is_ifetch_fault(&vcpu->arch)) {
604                                 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
605                                 return RESUME_GUEST;
606                         }
607
608                         er = kvm_emu_mmio_read(vcpu, inst);
609                 }
610         }
611
612         if (er == EMULATE_DONE) {
613                 ret = RESUME_GUEST;
614         } else if (er == EMULATE_DO_MMIO) {
615                 run->exit_reason = KVM_EXIT_MMIO;
616                 ret = RESUME_HOST;
617         } else {
618                 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
619                 ret = RESUME_GUEST;
620         }
621
622         return ret;
623 }
624
625 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
626 {
627         return kvm_handle_rdwr_fault(vcpu, false);
628 }
629
630 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
631 {
632         return kvm_handle_rdwr_fault(vcpu, true);
633 }
634
635 /**
636  * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
637  * @vcpu:       Virtual CPU context.
638  *
639  * Handle when the guest attempts to use fpu which hasn't been allowed
640  * by the root context.
641  */
642 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
643 {
644         struct kvm_run *run = vcpu->run;
645
646         /*
647          * If guest FPU not present, the FPU operation should have been
648          * treated as a reserved instruction!
649          * If FPU already in use, we shouldn't get this at all.
650          */
651         if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
652                 kvm_err("%s internal error\n", __func__);
653                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
654                 return RESUME_HOST;
655         }
656
657         kvm_own_fpu(vcpu);
658
659         return RESUME_GUEST;
660 }
661
662 /*
663  * LoongArch KVM callback handling for unimplemented guest exiting
664  */
665 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
666 {
667         unsigned int ecode, inst;
668         unsigned long estat, badv;
669
670         /* Fetch the instruction */
671         inst = vcpu->arch.badi;
672         badv = vcpu->arch.badv;
673         estat = vcpu->arch.host_estat;
674         ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
675         kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
676                         ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
677         kvm_arch_vcpu_dump_regs(vcpu);
678         kvm_queue_exception(vcpu, EXCCODE_INE, 0);
679
680         return RESUME_GUEST;
681 }
682
683 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
684         [0 ... EXCCODE_INT_START - 1]   = kvm_fault_ni,
685         [EXCCODE_TLBI]                  = kvm_handle_read_fault,
686         [EXCCODE_TLBL]                  = kvm_handle_read_fault,
687         [EXCCODE_TLBS]                  = kvm_handle_write_fault,
688         [EXCCODE_TLBM]                  = kvm_handle_write_fault,
689         [EXCCODE_FPDIS]                 = kvm_handle_fpu_disabled,
690         [EXCCODE_GSPR]                  = kvm_handle_gspr,
691 };
692
693 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
694 {
695         return kvm_fault_tables[fault](vcpu);
696 }