GNU Linux-libre 4.14.251-gnu1
[releases.git] / virt / kvm / arm / mmio.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/kvm_host.h>
20 #include <asm/kvm_mmio.h>
21 #include <asm/kvm_emulate.h>
22 #include <trace/events/kvm.h>
23
24 #include "trace.h"
25
26 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
27 {
28         void *datap = NULL;
29         union {
30                 u8      byte;
31                 u16     hword;
32                 u32     word;
33                 u64     dword;
34         } tmp;
35
36         switch (len) {
37         case 1:
38                 tmp.byte        = data;
39                 datap           = &tmp.byte;
40                 break;
41         case 2:
42                 tmp.hword       = data;
43                 datap           = &tmp.hword;
44                 break;
45         case 4:
46                 tmp.word        = data;
47                 datap           = &tmp.word;
48                 break;
49         case 8:
50                 tmp.dword       = data;
51                 datap           = &tmp.dword;
52                 break;
53         }
54
55         memcpy(buf, datap, len);
56 }
57
58 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
59 {
60         unsigned long data = 0;
61         union {
62                 u16     hword;
63                 u32     word;
64                 u64     dword;
65         } tmp;
66
67         switch (len) {
68         case 1:
69                 data = *(u8 *)buf;
70                 break;
71         case 2:
72                 memcpy(&tmp.hword, buf, len);
73                 data = tmp.hword;
74                 break;
75         case 4:
76                 memcpy(&tmp.word, buf, len);
77                 data = tmp.word;
78                 break;
79         case 8:
80                 memcpy(&tmp.dword, buf, len);
81                 data = tmp.dword;
82                 break;
83         }
84
85         return data;
86 }
87
88 /**
89  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
90  *                           or in-kernel IO emulation
91  *
92  * @vcpu: The VCPU pointer
93  * @run:  The VCPU run struct containing the mmio data
94  */
95 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
96 {
97         unsigned long data;
98         unsigned int len;
99         int mask;
100
101         /* Detect an already handled MMIO return */
102         if (unlikely(!vcpu->mmio_needed))
103                 return 0;
104
105         vcpu->mmio_needed = 0;
106
107         if (!run->mmio.is_write) {
108                 len = run->mmio.len;
109                 if (len > sizeof(unsigned long))
110                         return -EINVAL;
111
112                 data = kvm_mmio_read_buf(run->mmio.data, len);
113
114                 if (vcpu->arch.mmio_decode.sign_extend &&
115                     len < sizeof(unsigned long)) {
116                         mask = 1U << ((len * 8) - 1);
117                         data = (data ^ mask) - mask;
118                 }
119
120                 if (!vcpu->arch.mmio_decode.sixty_four)
121                         data = data & 0xffffffff;
122
123                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
124                                &data);
125                 data = vcpu_data_host_to_guest(vcpu, data, len);
126                 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
127         }
128
129         /*
130          * The MMIO instruction is emulated and should not be re-executed
131          * in the guest.
132          */
133         kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
134
135         return 0;
136 }
137
138 static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
139 {
140         unsigned long rt;
141         int access_size;
142         bool sign_extend;
143         bool sixty_four;
144
145         if (kvm_vcpu_dabt_iss1tw(vcpu)) {
146                 /* page table accesses IO mem: tell guest to fix its TTBR */
147                 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
148                 return 1;
149         }
150
151         access_size = kvm_vcpu_dabt_get_as(vcpu);
152         if (unlikely(access_size < 0))
153                 return access_size;
154
155         *is_write = kvm_vcpu_dabt_iswrite(vcpu);
156         sign_extend = kvm_vcpu_dabt_issext(vcpu);
157         sixty_four = kvm_vcpu_dabt_issf(vcpu);
158         rt = kvm_vcpu_dabt_get_rd(vcpu);
159
160         *len = access_size;
161         vcpu->arch.mmio_decode.sign_extend = sign_extend;
162         vcpu->arch.mmio_decode.rt = rt;
163         vcpu->arch.mmio_decode.sixty_four = sixty_four;
164
165         return 0;
166 }
167
168 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
169                  phys_addr_t fault_ipa)
170 {
171         unsigned long data;
172         unsigned long rt;
173         int ret;
174         bool is_write;
175         int len;
176         u8 data_buf[8];
177
178         /*
179          * Prepare MMIO operation. First decode the syndrome data we get
180          * from the CPU. Then try if some in-kernel emulation feels
181          * responsible, otherwise let user space do its magic.
182          */
183         if (kvm_vcpu_dabt_isvalid(vcpu)) {
184                 ret = decode_hsr(vcpu, &is_write, &len);
185                 if (ret)
186                         return ret;
187         } else {
188                 kvm_err("load/store instruction decoding not implemented\n");
189                 return -ENOSYS;
190         }
191
192         rt = vcpu->arch.mmio_decode.rt;
193
194         if (is_write) {
195                 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
196                                                len);
197
198                 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
199                 kvm_mmio_write_buf(data_buf, len, data);
200
201                 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
202                                        data_buf);
203         } else {
204                 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
205                                fault_ipa, NULL);
206
207                 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
208                                       data_buf);
209         }
210
211         /* Now prepare kvm_run for the potential return to userland. */
212         run->mmio.is_write      = is_write;
213         run->mmio.phys_addr     = fault_ipa;
214         run->mmio.len           = len;
215         vcpu->mmio_needed       = 1;
216
217         if (!ret) {
218                 /* We handled the access successfully in the kernel. */
219                 if (!is_write)
220                         memcpy(run->mmio.data, data_buf, len);
221                 vcpu->stat.mmio_exit_kernel++;
222                 kvm_handle_mmio_return(vcpu, run);
223                 return 1;
224         }
225
226         if (is_write)
227                 memcpy(run->mmio.data, data_buf, len);
228         vcpu->stat.mmio_exit_user++;
229         run->exit_reason        = KVM_EXIT_MMIO;
230         return 0;
231 }