1 // SPDX-License-Identifier: GPL-2.0
3 * Hosting Protected Virtual Machines
5 * Copyright IBM Corp. 2019, 2020
6 * Author(s): Janosch Frank <frankja@linux.ibm.com>
9 #include <linux/kvm_host.h>
10 #include <linux/minmax.h>
11 #include <linux/pagemap.h>
12 #include <linux/sched/signal.h>
16 #include <linux/pagewalk.h>
17 #include <linux/sched/mm.h>
18 #include <linux/mmu_notifier.h>
21 static void kvm_s390_clear_pv_state(struct kvm *kvm)
23 kvm->arch.pv.handle = 0;
24 kvm->arch.pv.guest_len = 0;
25 kvm->arch.pv.stor_base = 0;
26 kvm->arch.pv.stor_var = NULL;
29 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
33 if (!kvm_s390_pv_cpu_get_handle(vcpu))
36 cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
38 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
39 vcpu->vcpu_id, *rc, *rrc);
40 WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
42 /* Intended memory leak for something that should never happen. */
44 free_pages(vcpu->arch.pv.stor_base,
45 get_order(uv_info.guest_cpu_stor_len));
47 free_page(sida_origin(vcpu->arch.sie_block));
48 vcpu->arch.sie_block->pv_handle_cpu = 0;
49 vcpu->arch.sie_block->pv_handle_config = 0;
50 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
51 vcpu->arch.sie_block->sdf = 0;
53 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
54 * Use the reset value of gbea to avoid leaking the kernel pointer of
55 * the just freed sida.
57 vcpu->arch.sie_block->gbea = 1;
58 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
63 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
65 struct uv_cb_csc uvcb = {
66 .header.cmd = UVC_CMD_CREATE_SEC_CPU,
67 .header.len = sizeof(uvcb),
71 if (kvm_s390_pv_cpu_get_handle(vcpu))
74 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
75 get_order(uv_info.guest_cpu_stor_len));
76 if (!vcpu->arch.pv.stor_base)
80 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
81 uvcb.num = vcpu->arch.sie_block->icpua;
82 uvcb.state_origin = (u64)vcpu->arch.sie_block;
83 uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
85 /* Alloc Secure Instruction Data Area Designation */
86 vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
87 if (!vcpu->arch.sie_block->sidad) {
88 free_pages(vcpu->arch.pv.stor_base,
89 get_order(uv_info.guest_cpu_stor_len));
93 cc = uv_call(0, (u64)&uvcb);
95 *rrc = uvcb.header.rrc;
96 KVM_UV_EVENT(vcpu->kvm, 3,
97 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
98 vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
104 kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
109 vcpu->arch.pv.handle = uvcb.cpu_handle;
110 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
111 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
112 vcpu->arch.sie_block->sdf = 2;
113 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
117 /* only free resources when the destroy was successful */
118 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
120 vfree(kvm->arch.pv.stor_var);
121 free_pages(kvm->arch.pv.stor_base,
122 get_order(uv_info.guest_base_stor_len));
123 kvm_s390_clear_pv_state(kvm);
126 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
128 unsigned long base = uv_info.guest_base_stor_len;
129 unsigned long virt = uv_info.guest_virt_var_stor_len;
130 unsigned long npages = 0, vlen = 0;
132 kvm->arch.pv.stor_var = NULL;
133 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
134 if (!kvm->arch.pv.stor_base)
138 * Calculate current guest storage for allocation of the
139 * variable storage, which is based on the length in MB.
141 * Slots are sorted by GFN
143 mutex_lock(&kvm->slots_lock);
144 npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
145 mutex_unlock(&kvm->slots_lock);
147 kvm->arch.pv.guest_len = npages * PAGE_SIZE;
149 /* Allocate variable storage */
150 vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
151 vlen += uv_info.guest_virt_base_stor_len;
152 kvm->arch.pv.stor_var = vzalloc(vlen);
153 if (!kvm->arch.pv.stor_var)
158 kvm_s390_pv_dealloc_vm(kvm);
162 /* this should not fail, but if it does, we must not free the donated memory */
163 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
167 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
168 UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
169 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
171 * if the mm still has a mapping, make all its pages accessible
172 * before destroying the guest
174 if (mmget_not_zero(kvm->mm)) {
175 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
180 atomic_dec(&kvm->mm->context.protected_count);
181 kvm_s390_pv_dealloc_vm(kvm);
183 /* Intended memory leak on "impossible" error */
184 s390_replace_asce(kvm->arch.gmap);
186 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
187 WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
189 return cc ? -EIO : 0;
192 static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
193 struct mm_struct *mm)
195 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
199 * No locking is needed since this is the last thread of the last user of this
201 * When the struct kvm gets deinitialized, this notifier is also
202 * unregistered. This means that if this notifier runs, then the
203 * struct kvm is still valid.
205 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
208 static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
209 .release = kvm_s390_pv_mmu_notifier_release,
212 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
214 struct uv_cb_cgc uvcb = {
215 .header.cmd = UVC_CMD_CREATE_SEC_CONF,
216 .header.len = sizeof(uvcb)
221 ret = kvm_s390_pv_alloc_vm(kvm);
226 uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
227 uvcb.guest_stor_len = kvm->arch.pv.guest_len;
228 uvcb.guest_asce = kvm->arch.gmap->asce;
229 uvcb.guest_sca = (unsigned long)kvm->arch.sca;
230 uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
231 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
233 cc = uv_call_sched(0, (u64)&uvcb);
234 *rc = uvcb.header.rc;
235 *rrc = uvcb.header.rrc;
236 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
237 uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
240 kvm->arch.pv.handle = uvcb.guest_handle;
242 atomic_inc(&kvm->mm->context.protected_count);
244 if (uvcb.header.rc & UVC_RC_NEED_DESTROY) {
245 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
247 atomic_dec(&kvm->mm->context.protected_count);
248 kvm_s390_pv_dealloc_vm(kvm);
252 kvm->arch.gmap->guest_handle = uvcb.guest_handle;
253 /* Add the notifier only once. No races because we hold kvm->lock */
254 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
255 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
256 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
261 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
264 struct uv_cb_ssc uvcb = {
265 .header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
266 .header.len = sizeof(uvcb),
267 .sec_header_origin = (u64)hdr,
268 .sec_header_len = length,
269 .guest_handle = kvm_s390_pv_get_handle(kvm),
271 int cc = uv_call(0, (u64)&uvcb);
273 *rc = uvcb.header.rc;
274 *rrc = uvcb.header.rrc;
275 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
277 return cc ? -EINVAL : 0;
280 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
281 u64 offset, u16 *rc, u16 *rrc)
283 struct uv_cb_unp uvcb = {
284 .header.cmd = UVC_CMD_UNPACK_IMG,
285 .header.len = sizeof(uvcb),
286 .guest_handle = kvm_s390_pv_get_handle(kvm),
291 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
293 *rc = uvcb.header.rc;
294 *rrc = uvcb.header.rrc;
296 if (ret && ret != -EAGAIN)
297 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
298 uvcb.gaddr, *rc, *rrc);
302 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
303 unsigned long tweak, u16 *rc, u16 *rrc)
308 if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
311 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
314 while (offset < size) {
315 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
316 if (ret == -EAGAIN) {
318 if (fatal_signal_pending(current))
328 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
332 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
334 struct uv_cb_cpu_set_state uvcb = {
335 .header.cmd = UVC_CMD_CPU_SET_STATE,
336 .header.len = sizeof(uvcb),
337 .cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu),
342 cc = uv_call(0, (u64)&uvcb);
343 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
344 vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
350 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
352 struct uv_cb_dump_cpu uvcb = {
353 .header.cmd = UVC_CMD_DUMP_CPU,
354 .header.len = sizeof(uvcb),
355 .cpu_handle = vcpu->arch.pv.handle,
356 .dump_area_origin = (u64)buff,
360 cc = uv_call_sched(0, (u64)&uvcb);
361 *rc = uvcb.header.rc;
362 *rrc = uvcb.header.rrc;
366 /* Size of the cache for the storage state dump data. 1MB for now */
367 #define DUMP_BUFF_LEN HPAGE_SIZE
370 * kvm_s390_pv_dump_stor_state
372 * @kvm: pointer to the guest's KVM struct
373 * @buff_user: Userspace pointer where we will write the results to
374 * @gaddr: Starting absolute guest address for which the storage state
376 * @buff_user_len: Length of the buff_user buffer
377 * @rc: Pointer to where the uvcb return code is stored
378 * @rrc: Pointer to where the uvcb return reason code is stored
380 * Stores buff_len bytes of tweak component values to buff_user
381 * starting with the 1MB block specified by the absolute guest address
382 * (gaddr). The gaddr pointer will be updated with the last address
383 * for which data was written when returning to userspace. buff_user
384 * might be written to even if an error rc is returned. For instance
385 * if we encounter a fault after writing the first page of data.
387 * Context: kvm->lock needs to be held
391 * -ENOMEM if allocating the cache fails
392 * -EINVAL if gaddr is not aligned to 1MB
393 * -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
394 * -EINVAL if the UV call fails, rc and rrc will be set in this case
395 * -EFAULT if copying the result to buff_user failed
397 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
398 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc)
400 struct uv_cb_dump_stor_state uvcb = {
401 .header.cmd = UVC_CMD_DUMP_CONF_STOR_STATE,
402 .header.len = sizeof(uvcb),
403 .config_handle = kvm->arch.pv.handle,
405 .dump_area_origin = 0,
407 const u64 increment_len = uv_info.conf_dump_storage_state_len;
408 size_t buff_kvm_size;
409 size_t size_done = 0;
414 /* UV call processes 1MB guest storage chunks at a time */
415 if (!IS_ALIGNED(*gaddr, HPAGE_SIZE))
419 * We provide the storage state for 1MB chunks of guest
420 * storage. The buffer will need to be aligned to
421 * conf_dump_storage_state_len so we don't end on a partial
424 if (!buff_user_len ||
425 !IS_ALIGNED(buff_user_len, increment_len))
429 * Allocate a buffer from which we will later copy to the user
430 * process. We don't want userspace to dictate our buffer size
431 * so we limit it to DUMP_BUFF_LEN.
434 buff_kvm_size = min_t(u64, buff_user_len, DUMP_BUFF_LEN);
435 buff_kvm = vzalloc(buff_kvm_size);
440 uvcb.dump_area_origin = (u64)buff_kvm;
441 /* We will loop until the user buffer is filled or an error occurs */
443 /* Get 1MB worth of guest storage state data */
444 cc = uv_call_sched(0, (u64)&uvcb);
452 size_done += increment_len;
453 uvcb.dump_area_origin += increment_len;
454 buff_user_len -= increment_len;
455 uvcb.gaddr += HPAGE_SIZE;
457 /* KVM Buffer full, time to copy to the process */
458 if (!buff_user_len || size_done == DUMP_BUFF_LEN) {
459 if (copy_to_user(buff_user, buff_kvm, size_done)) {
464 buff_user += size_done;
466 uvcb.dump_area_origin = (u64)buff_kvm;
468 } while (buff_user_len);
470 /* Report back where we ended dumping */
473 /* Lets only log errors, we don't want to spam */
477 "PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x",
478 uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc);
479 *rc = uvcb.header.rc;
480 *rrc = uvcb.header.rrc;
487 * kvm_s390_pv_dump_complete
489 * @kvm: pointer to the guest's KVM struct
490 * @buff_user: Userspace pointer where we will write the results to
491 * @rc: Pointer to where the uvcb return code is stored
492 * @rrc: Pointer to where the uvcb return reason code is stored
494 * Completes the dumping operation and writes the completion data to
497 * Context: kvm->lock needs to be held
501 * -ENOMEM if allocating the completion buffer fails
502 * -EINVAL if the UV call fails, rc and rrc will be set in this case
503 * -EFAULT if copying the result to buff_user failed
505 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
508 struct uv_cb_dump_complete complete = {
509 .header.len = sizeof(complete),
510 .header.cmd = UVC_CMD_DUMP_COMPLETE,
511 .config_handle = kvm_s390_pv_get_handle(kvm),
516 /* Allocate dump area */
517 compl_data = vzalloc(uv_info.conf_dump_finalize_len);
520 complete.dump_area_origin = (u64)compl_data;
522 ret = uv_call_sched(0, (u64)&complete);
523 *rc = complete.header.rc;
524 *rrc = complete.header.rrc;
525 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x",
526 complete.header.rc, complete.header.rrc);
530 * kvm_s390_pv_dealloc_vm() will also (mem)set
531 * this to false on a reboot or other destroy
532 * operation for this vm.
534 kvm->arch.pv.dumping = false;
535 kvm_s390_vcpu_unblock_all(kvm);
536 ret = copy_to_user(buff_user, compl_data, uv_info.conf_dump_finalize_len);
541 /* If the UVC returned an error, translate it to -EINVAL */