GNU Linux-libre 6.1.91-gnu
[releases.git] / virt / kvm / async_pf.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kvm asynchronous fault support
4  *
5  * Copyright 2010 Red Hat, Inc.
6  *
7  * Author:
8  *      Gleb Natapov <gleb@redhat.com>
9  */
10
11 #include <linux/kvm_host.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/mmu_context.h>
15 #include <linux/sched/mm.h>
16
17 #include "async_pf.h"
18 #include <trace/events/kvm.h>
19
20 static struct kmem_cache *async_pf_cache;
21
22 int kvm_async_pf_init(void)
23 {
24         async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
25
26         if (!async_pf_cache)
27                 return -ENOMEM;
28
29         return 0;
30 }
31
32 void kvm_async_pf_deinit(void)
33 {
34         kmem_cache_destroy(async_pf_cache);
35         async_pf_cache = NULL;
36 }
37
38 void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
39 {
40         INIT_LIST_HEAD(&vcpu->async_pf.done);
41         INIT_LIST_HEAD(&vcpu->async_pf.queue);
42         spin_lock_init(&vcpu->async_pf.lock);
43 }
44
45 static void async_pf_execute(struct work_struct *work)
46 {
47         struct kvm_async_pf *apf =
48                 container_of(work, struct kvm_async_pf, work);
49         struct mm_struct *mm = apf->mm;
50         struct kvm_vcpu *vcpu = apf->vcpu;
51         unsigned long addr = apf->addr;
52         gpa_t cr2_or_gpa = apf->cr2_or_gpa;
53         int locked = 1;
54         bool first;
55
56         might_sleep();
57
58         /*
59          * This work is run asynchronously to the task which owns
60          * mm and might be done in another context, so we must
61          * access remotely.
62          */
63         mmap_read_lock(mm);
64         get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
65                         &locked);
66         if (locked)
67                 mmap_read_unlock(mm);
68
69         if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
70                 kvm_arch_async_page_present(vcpu, apf);
71
72         spin_lock(&vcpu->async_pf.lock);
73         first = list_empty(&vcpu->async_pf.done);
74         list_add_tail(&apf->link, &vcpu->async_pf.done);
75         apf->vcpu = NULL;
76         spin_unlock(&vcpu->async_pf.lock);
77
78         if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
79                 kvm_arch_async_page_present_queued(vcpu);
80
81         /*
82          * apf may be freed by kvm_check_async_pf_completion() after
83          * this point
84          */
85
86         trace_kvm_async_pf_completed(addr, cr2_or_gpa);
87
88         __kvm_vcpu_wake_up(vcpu);
89
90         mmput(mm);
91 }
92
93 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
94 {
95         /*
96          * The async #PF is "done", but KVM must wait for the work item itself,
97          * i.e. async_pf_execute(), to run to completion.  If KVM is a module,
98          * KVM must ensure *no* code owned by the KVM (the module) can be run
99          * after the last call to module_put().  Note, flushing the work item
100          * is always required when the item is taken off the completion queue.
101          * E.g. even if the vCPU handles the item in the "normal" path, the VM
102          * could be terminated before async_pf_execute() completes.
103          *
104          * Wake all events skip the queue and go straight done, i.e. don't
105          * need to be flushed (but sanity check that the work wasn't queued).
106          */
107         if (work->wakeup_all)
108                 WARN_ON_ONCE(work->work.func);
109         else
110                 flush_work(&work->work);
111         kmem_cache_free(async_pf_cache, work);
112 }
113
114 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
115 {
116         spin_lock(&vcpu->async_pf.lock);
117
118         /* cancel outstanding work queue item */
119         while (!list_empty(&vcpu->async_pf.queue)) {
120                 struct kvm_async_pf *work =
121                         list_first_entry(&vcpu->async_pf.queue,
122                                          typeof(*work), queue);
123                 list_del(&work->queue);
124
125                 /*
126                  * We know it's present in vcpu->async_pf.done, do
127                  * nothing here.
128                  */
129                 if (!work->vcpu)
130                         continue;
131
132                 spin_unlock(&vcpu->async_pf.lock);
133 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
134                 flush_work(&work->work);
135 #else
136                 if (cancel_work_sync(&work->work)) {
137                         mmput(work->mm);
138                         kmem_cache_free(async_pf_cache, work);
139                 }
140 #endif
141                 spin_lock(&vcpu->async_pf.lock);
142         }
143
144         while (!list_empty(&vcpu->async_pf.done)) {
145                 struct kvm_async_pf *work =
146                         list_first_entry(&vcpu->async_pf.done,
147                                          typeof(*work), link);
148                 list_del(&work->link);
149
150                 spin_unlock(&vcpu->async_pf.lock);
151                 kvm_flush_and_free_async_pf_work(work);
152                 spin_lock(&vcpu->async_pf.lock);
153         }
154         spin_unlock(&vcpu->async_pf.lock);
155
156         vcpu->async_pf.queued = 0;
157 }
158
159 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
160 {
161         struct kvm_async_pf *work;
162
163         while (!list_empty_careful(&vcpu->async_pf.done) &&
164               kvm_arch_can_dequeue_async_page_present(vcpu)) {
165                 spin_lock(&vcpu->async_pf.lock);
166                 work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
167                                               link);
168                 list_del(&work->link);
169                 spin_unlock(&vcpu->async_pf.lock);
170
171                 kvm_arch_async_page_ready(vcpu, work);
172                 if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
173                         kvm_arch_async_page_present(vcpu, work);
174
175                 list_del(&work->queue);
176                 vcpu->async_pf.queued--;
177                 kvm_flush_and_free_async_pf_work(work);
178         }
179 }
180
181 /*
182  * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
183  * success, 'false' on failure (page fault has to be handled synchronously).
184  */
185 bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
186                         unsigned long hva, struct kvm_arch_async_pf *arch)
187 {
188         struct kvm_async_pf *work;
189
190         if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
191                 return false;
192
193         /* Arch specific code should not do async PF in this case */
194         if (unlikely(kvm_is_error_hva(hva)))
195                 return false;
196
197         /*
198          * do alloc nowait since if we are going to sleep anyway we
199          * may as well sleep faulting in page
200          */
201         work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
202         if (!work)
203                 return false;
204
205         work->wakeup_all = false;
206         work->vcpu = vcpu;
207         work->cr2_or_gpa = cr2_or_gpa;
208         work->addr = hva;
209         work->arch = *arch;
210         work->mm = current->mm;
211         mmget(work->mm);
212
213         INIT_WORK(&work->work, async_pf_execute);
214
215         list_add_tail(&work->queue, &vcpu->async_pf.queue);
216         vcpu->async_pf.queued++;
217         work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
218
219         schedule_work(&work->work);
220
221         return true;
222 }
223
224 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
225 {
226         struct kvm_async_pf *work;
227         bool first;
228
229         if (!list_empty_careful(&vcpu->async_pf.done))
230                 return 0;
231
232         work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
233         if (!work)
234                 return -ENOMEM;
235
236         work->wakeup_all = true;
237         INIT_LIST_HEAD(&work->queue); /* for list_del to work */
238
239         spin_lock(&vcpu->async_pf.lock);
240         first = list_empty(&vcpu->async_pf.done);
241         list_add_tail(&work->link, &vcpu->async_pf.done);
242         spin_unlock(&vcpu->async_pf.lock);
243
244         if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
245                 kvm_arch_async_page_present_queued(vcpu);
246
247         vcpu->async_pf.queued++;
248         return 0;
249 }