Linux 6.7-rc7
[linux-modified.git] / arch / arm64 / kvm / hyp / nvhe / setup.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/fixed_config.h>
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/trap_handler.h>
22
23 unsigned long hyp_nr_cpus;
24
25 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
26                          (unsigned long)__per_cpu_start)
27
28 static void *vmemmap_base;
29 static void *vm_table_base;
30 static void *hyp_pgt_base;
31 static void *host_s2_pgt_base;
32 static void *ffa_proxy_pages;
33 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
34 static struct hyp_pool hpool;
35
36 static int divide_memory_pool(void *virt, unsigned long size)
37 {
38         unsigned long nr_pages;
39
40         hyp_early_alloc_init(virt, size);
41
42         nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
43         vmemmap_base = hyp_early_alloc_contig(nr_pages);
44         if (!vmemmap_base)
45                 return -ENOMEM;
46
47         nr_pages = hyp_vm_table_pages();
48         vm_table_base = hyp_early_alloc_contig(nr_pages);
49         if (!vm_table_base)
50                 return -ENOMEM;
51
52         nr_pages = hyp_s1_pgtable_pages();
53         hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
54         if (!hyp_pgt_base)
55                 return -ENOMEM;
56
57         nr_pages = host_s2_pgtable_pages();
58         host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
59         if (!host_s2_pgt_base)
60                 return -ENOMEM;
61
62         nr_pages = hyp_ffa_proxy_pages();
63         ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
64         if (!ffa_proxy_pages)
65                 return -ENOMEM;
66
67         return 0;
68 }
69
70 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
71                                  unsigned long *per_cpu_base,
72                                  u32 hyp_va_bits)
73 {
74         void *start, *end, *virt = hyp_phys_to_virt(phys);
75         unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
76         enum kvm_pgtable_prot prot;
77         int ret, i;
78
79         /* Recreate the hyp page-table using the early page allocator */
80         hyp_early_alloc_init(hyp_pgt_base, pgt_size);
81         ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
82                                    &hyp_early_alloc_mm_ops);
83         if (ret)
84                 return ret;
85
86         ret = hyp_create_idmap(hyp_va_bits);
87         if (ret)
88                 return ret;
89
90         ret = hyp_map_vectors();
91         if (ret)
92                 return ret;
93
94         ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
95         if (ret)
96                 return ret;
97
98         ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
99         if (ret)
100                 return ret;
101
102         ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
103         if (ret)
104                 return ret;
105
106         ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
107         if (ret)
108                 return ret;
109
110         ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
111         if (ret)
112                 return ret;
113
114         for (i = 0; i < hyp_nr_cpus; i++) {
115                 struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
116
117                 start = (void *)kern_hyp_va(per_cpu_base[i]);
118                 end = start + PAGE_ALIGN(hyp_percpu_size);
119                 ret = pkvm_create_mappings(start, end, PAGE_HYP);
120                 if (ret)
121                         return ret;
122
123                 ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
124                 if (ret)
125                         return ret;
126         }
127
128         /*
129          * Map the host sections RO in the hypervisor, but transfer the
130          * ownership from the host to the hypervisor itself to make sure they
131          * can't be donated or shared with another entity.
132          *
133          * The ownership transition requires matching changes in the host
134          * stage-2. This will be done later (see finalize_host_mappings()) once
135          * the hyp_vmemmap is addressable.
136          */
137         prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
138         ret = pkvm_create_mappings(&kvm_vgic_global_state,
139                                    &kvm_vgic_global_state + 1, prot);
140         if (ret)
141                 return ret;
142
143         return 0;
144 }
145
146 static void update_nvhe_init_params(void)
147 {
148         struct kvm_nvhe_init_params *params;
149         unsigned long i;
150
151         for (i = 0; i < hyp_nr_cpus; i++) {
152                 params = per_cpu_ptr(&kvm_init_params, i);
153                 params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
154                 dcache_clean_inval_poc((unsigned long)params,
155                                     (unsigned long)params + sizeof(*params));
156         }
157 }
158
159 static void *hyp_zalloc_hyp_page(void *arg)
160 {
161         return hyp_alloc_pages(&hpool, 0);
162 }
163
164 static void hpool_get_page(void *addr)
165 {
166         hyp_get_page(&hpool, addr);
167 }
168
169 static void hpool_put_page(void *addr)
170 {
171         hyp_put_page(&hpool, addr);
172 }
173
174 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
175                                      enum kvm_pgtable_walk_flags visit)
176 {
177         enum kvm_pgtable_prot prot;
178         enum pkvm_page_state state;
179         phys_addr_t phys;
180
181         if (!kvm_pte_valid(ctx->old))
182                 return 0;
183
184         if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
185                 return -EINVAL;
186
187         phys = kvm_pte_to_phys(ctx->old);
188         if (!addr_is_memory(phys))
189                 return -EINVAL;
190
191         /*
192          * Adjust the host stage-2 mappings to match the ownership attributes
193          * configured in the hypervisor stage-1.
194          */
195         state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
196         switch (state) {
197         case PKVM_PAGE_OWNED:
198                 return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
199         case PKVM_PAGE_SHARED_OWNED:
200                 prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
201                 break;
202         case PKVM_PAGE_SHARED_BORROWED:
203                 prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
204                 break;
205         default:
206                 return -EINVAL;
207         }
208
209         return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
210 }
211
212 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
213                                          enum kvm_pgtable_walk_flags visit)
214 {
215         /*
216          * Fix-up the refcount for the page-table pages as the early allocator
217          * was unable to access the hyp_vmemmap and so the buddy allocator has
218          * initialised the refcount to '1'.
219          */
220         if (kvm_pte_valid(ctx->old))
221                 ctx->mm_ops->get_page(ctx->ptep);
222
223         return 0;
224 }
225
226 static int fix_host_ownership(void)
227 {
228         struct kvm_pgtable_walker walker = {
229                 .cb     = fix_host_ownership_walker,
230                 .flags  = KVM_PGTABLE_WALK_LEAF,
231         };
232         int i, ret;
233
234         for (i = 0; i < hyp_memblock_nr; i++) {
235                 struct memblock_region *reg = &hyp_memory[i];
236                 u64 start = (u64)hyp_phys_to_virt(reg->base);
237
238                 ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
239                 if (ret)
240                         return ret;
241         }
242
243         return 0;
244 }
245
246 static int fix_hyp_pgtable_refcnt(void)
247 {
248         struct kvm_pgtable_walker walker = {
249                 .cb     = fix_hyp_pgtable_refcnt_walker,
250                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
251                 .arg    = pkvm_pgtable.mm_ops,
252         };
253
254         return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
255                                 &walker);
256 }
257
258 void __noreturn __pkvm_init_finalise(void)
259 {
260         struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
261         struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
262         unsigned long nr_pages, reserved_pages, pfn;
263         int ret;
264
265         /* Now that the vmemmap is backed, install the full-fledged allocator */
266         pfn = hyp_virt_to_pfn(hyp_pgt_base);
267         nr_pages = hyp_s1_pgtable_pages();
268         reserved_pages = hyp_early_alloc_nr_used_pages();
269         ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
270         if (ret)
271                 goto out;
272
273         ret = kvm_host_prepare_stage2(host_s2_pgt_base);
274         if (ret)
275                 goto out;
276
277         pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
278                 .zalloc_page = hyp_zalloc_hyp_page,
279                 .phys_to_virt = hyp_phys_to_virt,
280                 .virt_to_phys = hyp_virt_to_phys,
281                 .get_page = hpool_get_page,
282                 .put_page = hpool_put_page,
283                 .page_count = hyp_page_count,
284         };
285         pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
286
287         ret = fix_host_ownership();
288         if (ret)
289                 goto out;
290
291         ret = fix_hyp_pgtable_refcnt();
292         if (ret)
293                 goto out;
294
295         ret = hyp_create_pcpu_fixmap();
296         if (ret)
297                 goto out;
298
299         ret = hyp_ffa_init(ffa_proxy_pages);
300         if (ret)
301                 goto out;
302
303         pkvm_hyp_vm_table_init(vm_table_base);
304 out:
305         /*
306          * We tail-called to here from handle___pkvm_init() and will not return,
307          * so make sure to propagate the return value to the host.
308          */
309         cpu_reg(host_ctxt, 1) = ret;
310
311         __host_enter(host_ctxt);
312 }
313
314 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
315                 unsigned long *per_cpu_base, u32 hyp_va_bits)
316 {
317         struct kvm_nvhe_init_params *params;
318         void *virt = hyp_phys_to_virt(phys);
319         void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
320         int ret;
321
322         BUG_ON(kvm_check_pvm_sysreg_table());
323
324         if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
325                 return -EINVAL;
326
327         hyp_spin_lock_init(&pkvm_pgd_lock);
328         hyp_nr_cpus = nr_cpus;
329
330         ret = divide_memory_pool(virt, size);
331         if (ret)
332                 return ret;
333
334         ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
335         if (ret)
336                 return ret;
337
338         update_nvhe_init_params();
339
340         /* Jump in the idmap page to switch to the new page-tables */
341         params = this_cpu_ptr(&kvm_init_params);
342         fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
343         fn(__hyp_pa(params), __pkvm_init_finalise);
344
345         unreachable();
346 }