1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
9 #include <linux/memblock.h>
10 #include <asm/kvm_pgtable.h>
12 #define HYP_MEMBLOCK_REGIONS 128
14 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
15 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
17 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
19 unsigned long total = 0, i;
21 /* Provision the worst case scenario */
22 for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
23 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
30 static inline unsigned long __hyp_pgtable_total_pages(void)
32 unsigned long res = 0, i;
34 /* Cover all of memory with page-granularity */
35 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
36 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
37 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
43 static inline unsigned long hyp_s1_pgtable_pages(void)
47 res = __hyp_pgtable_total_pages();
49 /* Allow 1 GiB for private mappings */
50 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
55 static inline unsigned long host_s2_pgtable_pages(void)
60 * Include an extra 16 pages to safely upper-bound the worst case of
63 res = __hyp_pgtable_total_pages() + 16;
65 /* Allow 1 GiB for MMIO mappings */
66 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
71 #endif /* __ARM64_KVM_PKVM_H__ */