1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_DEFS_H
3 #define _ASM_X86_PGTABLE_DEFS_H
5 #include <linux/const.h>
6 #include <linux/mem_encrypt.h>
8 #include <asm/page_types.h>
10 #define FIRST_USER_ADDRESS 0UL
12 #define _PAGE_BIT_PRESENT 0 /* is present */
13 #define _PAGE_BIT_RW 1 /* writeable */
14 #define _PAGE_BIT_USER 2 /* userspace addressable */
15 #define _PAGE_BIT_PWT 3 /* page write through */
16 #define _PAGE_BIT_PCD 4 /* page cache disabled */
17 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
18 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
19 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
20 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
21 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
22 #define _PAGE_BIT_SOFTW1 9 /* available for programmer */
23 #define _PAGE_BIT_SOFTW2 10 /* " */
24 #define _PAGE_BIT_SOFTW3 11 /* " */
25 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
26 #define _PAGE_BIT_SOFTW4 58 /* available for programmer */
27 #define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */
28 #define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */
29 #define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */
30 #define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */
31 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
33 #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
34 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
35 #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
36 #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
38 /* If _PAGE_BIT_PRESENT is clear, we use these: */
39 /* - if the user mapped it with PROT_NONE; pte_present gives true */
40 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
42 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
43 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
44 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
45 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
46 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
47 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
48 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
49 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
50 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
51 #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
52 #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
53 #define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
54 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
55 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
56 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
57 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
58 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
59 #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
60 #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
61 #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
62 #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
64 #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0))
65 #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0))
66 #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
67 #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
70 #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
75 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
76 #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
78 #define _PAGE_KNL_ERRATUM_MASK 0
81 #ifdef CONFIG_MEM_SOFT_DIRTY
82 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
84 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
88 * Tracking soft dirty bit when a page goes to a swap is tricky.
89 * We need a bit which can be stored in pte _and_ not conflict
90 * with swap entry format. On x86 bits 1-4 are *not* involved
91 * into swap entry computation, but bit 7 is used for thp migration,
92 * so we borrow bit 1 for soft dirty tracking.
94 * Please note that this bit must be treated as swap dirty page
95 * mark if and only if the PTE/PMD has present bit clear!
97 #ifdef CONFIG_MEM_SOFT_DIRTY
98 #define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
100 #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
103 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
104 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
105 #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
106 #define __HAVE_ARCH_PTE_DEVMAP
108 #define _PAGE_NX (_AT(pteval_t, 0))
109 #define _PAGE_DEVMAP (_AT(pteval_t, 0))
112 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
114 #define _PAGE_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\
115 _PAGE_ACCESSED | _PAGE_DIRTY)
116 #define _KERNPG_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | \
117 _PAGE_ACCESSED | _PAGE_DIRTY)
120 * Set of bits not changed in pte_modify. The pte's
121 * protection key is treated like _PAGE_RW, for
122 * instance, and is *not* included in this mask since
123 * pte_modify() does modify it.
125 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
126 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
127 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
128 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
131 * The cache modes defined here are used to translate between pure SW usage
132 * and the HW defined cache mode bits and/or PAT entries.
134 * The resulting bits for PWT, PCD and PAT should be chosen in a way
135 * to have the WB mode at index 0 (all bits clear). This is the default
136 * right now and likely would break too much if changed.
139 enum page_cache_mode {
140 _PAGE_CACHE_MODE_WB = 0,
141 _PAGE_CACHE_MODE_WC = 1,
142 _PAGE_CACHE_MODE_UC_MINUS = 2,
143 _PAGE_CACHE_MODE_UC = 3,
144 _PAGE_CACHE_MODE_WT = 4,
145 _PAGE_CACHE_MODE_WP = 5,
146 _PAGE_CACHE_MODE_NUM = 8
150 #define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
151 #define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
152 #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
153 #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
155 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
156 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
157 _PAGE_ACCESSED | _PAGE_NX)
159 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
160 _PAGE_USER | _PAGE_ACCESSED)
161 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
162 _PAGE_ACCESSED | _PAGE_NX)
163 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
165 #define PAGE_COPY PAGE_COPY_NOEXEC
166 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
167 _PAGE_ACCESSED | _PAGE_NX)
168 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
171 #define __PAGE_KERNEL_EXEC \
172 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
173 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
175 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
176 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
177 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
178 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
179 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
180 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
181 #define __PAGE_KERNEL_WP (__PAGE_KERNEL | _PAGE_CACHE_WP)
183 #define __PAGE_KERNEL_IO (__PAGE_KERNEL)
184 #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
188 #define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
190 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
191 _PAGE_DIRTY | _PAGE_ENC)
192 #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
194 #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC)
195 #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC)
197 #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL)
198 #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP)
200 #define default_pgprot(x) __pgprot((x) & __default_kernel_pte_mask)
202 #define PAGE_KERNEL default_pgprot(__PAGE_KERNEL | _PAGE_ENC)
203 #define PAGE_KERNEL_NOENC default_pgprot(__PAGE_KERNEL)
204 #define PAGE_KERNEL_RO default_pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
205 #define PAGE_KERNEL_EXEC default_pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
206 #define PAGE_KERNEL_EXEC_NOENC default_pgprot(__PAGE_KERNEL_EXEC)
207 #define PAGE_KERNEL_RX default_pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
208 #define PAGE_KERNEL_NOCACHE default_pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
209 #define PAGE_KERNEL_LARGE default_pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
210 #define PAGE_KERNEL_LARGE_EXEC default_pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
211 #define PAGE_KERNEL_VVAR default_pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
213 #define PAGE_KERNEL_IO default_pgprot(__PAGE_KERNEL_IO)
214 #define PAGE_KERNEL_IO_NOCACHE default_pgprot(__PAGE_KERNEL_IO_NOCACHE)
216 #endif /* __ASSEMBLY__ */
219 #define __P000 PAGE_NONE
220 #define __P001 PAGE_READONLY
221 #define __P010 PAGE_COPY
222 #define __P011 PAGE_COPY
223 #define __P100 PAGE_READONLY_EXEC
224 #define __P101 PAGE_READONLY_EXEC
225 #define __P110 PAGE_COPY_EXEC
226 #define __P111 PAGE_COPY_EXEC
228 #define __S000 PAGE_NONE
229 #define __S001 PAGE_READONLY
230 #define __S010 PAGE_SHARED
231 #define __S011 PAGE_SHARED
232 #define __S100 PAGE_READONLY_EXEC
233 #define __S101 PAGE_READONLY_EXEC
234 #define __S110 PAGE_SHARED_EXEC
235 #define __S111 PAGE_SHARED_EXEC
238 * early identity mapping pte attrib macros.
241 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
243 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
244 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
245 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
249 # include <asm/pgtable_32_types.h>
251 # include <asm/pgtable_64_types.h>
256 #include <linux/types.h>
258 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
259 #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
262 * Extracts the flags from a (pte|pmd|pud|pgd)val_t
263 * This includes the protection key value.
265 #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
267 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
269 typedef struct { pgdval_t pgd; } pgd_t;
271 #ifdef CONFIG_X86_PAE
274 * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
278 #define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK)
279 #define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
282 * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
283 * All other bits are Reserved MBZ
285 #define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
286 _PAGE_PWT | _PAGE_PCD | \
287 _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
290 /* No need to mask any bits for !PAE */
291 #define PGD_ALLOWED_BITS (~0ULL)
294 static inline pgd_t native_make_pgd(pgdval_t val)
296 return (pgd_t) { val & PGD_ALLOWED_BITS };
299 static inline pgdval_t native_pgd_val(pgd_t pgd)
301 return pgd.pgd & PGD_ALLOWED_BITS;
304 static inline pgdval_t pgd_flags(pgd_t pgd)
306 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
309 #if CONFIG_PGTABLE_LEVELS > 4
310 typedef struct { p4dval_t p4d; } p4d_t;
312 static inline p4d_t native_make_p4d(pudval_t val)
314 return (p4d_t) { val };
317 static inline p4dval_t native_p4d_val(p4d_t p4d)
322 #include <asm-generic/pgtable-nop4d.h>
324 static inline p4d_t native_make_p4d(pudval_t val)
326 return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
329 static inline p4dval_t native_p4d_val(p4d_t p4d)
331 return native_pgd_val(p4d.pgd);
335 #if CONFIG_PGTABLE_LEVELS > 3
336 typedef struct { pudval_t pud; } pud_t;
338 static inline pud_t native_make_pud(pmdval_t val)
340 return (pud_t) { val };
343 static inline pudval_t native_pud_val(pud_t pud)
348 #include <asm-generic/pgtable-nopud.h>
350 static inline pud_t native_make_pud(pudval_t val)
352 return (pud_t) { .p4d.pgd = native_make_pgd(val) };
355 static inline pudval_t native_pud_val(pud_t pud)
357 return native_pgd_val(pud.p4d.pgd);
361 #if CONFIG_PGTABLE_LEVELS > 2
362 typedef struct { pmdval_t pmd; } pmd_t;
364 static inline pmd_t native_make_pmd(pmdval_t val)
366 return (pmd_t) { val };
369 static inline pmdval_t native_pmd_val(pmd_t pmd)
374 #include <asm-generic/pgtable-nopmd.h>
376 static inline pmd_t native_make_pmd(pmdval_t val)
378 return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
381 static inline pmdval_t native_pmd_val(pmd_t pmd)
383 return native_pgd_val(pmd.pud.p4d.pgd);
387 static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
389 /* No 512 GiB huge pages yet */
393 static inline p4dval_t p4d_flags_mask(p4d_t p4d)
395 return ~p4d_pfn_mask(p4d);
398 static inline p4dval_t p4d_flags(p4d_t p4d)
400 return native_p4d_val(p4d) & p4d_flags_mask(p4d);
403 static inline pudval_t pud_pfn_mask(pud_t pud)
405 if (native_pud_val(pud) & _PAGE_PSE)
406 return PHYSICAL_PUD_PAGE_MASK;
411 static inline pudval_t pud_flags_mask(pud_t pud)
413 return ~pud_pfn_mask(pud);
416 static inline pudval_t pud_flags(pud_t pud)
418 return native_pud_val(pud) & pud_flags_mask(pud);
421 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
423 if (native_pmd_val(pmd) & _PAGE_PSE)
424 return PHYSICAL_PMD_PAGE_MASK;
429 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
431 return ~pmd_pfn_mask(pmd);
434 static inline pmdval_t pmd_flags(pmd_t pmd)
436 return native_pmd_val(pmd) & pmd_flags_mask(pmd);
439 static inline pte_t native_make_pte(pteval_t val)
441 return (pte_t) { .pte = val };
444 static inline pteval_t native_pte_val(pte_t pte)
449 static inline pteval_t pte_flags(pte_t pte)
451 return native_pte_val(pte) & PTE_FLAGS_MASK;
454 #define pgprot_val(x) ((x).pgprot)
455 #define __pgprot(x) ((pgprot_t) { (x) } )
457 extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
458 extern uint8_t __pte2cachemode_tbl[8];
460 #define __pte2cm_idx(cb) \
461 ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
462 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
463 (((cb) >> _PAGE_BIT_PWT) & 1))
464 #define __cm_idx2pte(i) \
465 ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
466 (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
467 (((i) & 1) << _PAGE_BIT_PWT))
469 static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
471 if (likely(pcm == 0))
473 return __cachemode2pte_tbl[pcm];
475 static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
477 return __pgprot(cachemode2protval(pcm));
479 static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
481 unsigned long masked;
483 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
484 if (likely(masked == 0))
486 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
488 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
490 pgprotval_t val = pgprot_val(pgprot);
493 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
494 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
497 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
499 pgprotval_t val = pgprot_val(pgprot);
502 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
503 ((val & _PAGE_PAT_LARGE) >>
504 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
509 typedef struct page *pgtable_t;
511 extern pteval_t __supported_pte_mask;
512 extern pteval_t __default_kernel_pte_mask;
513 extern void set_nx(void);
514 extern int nx_enabled;
516 #define pgprot_writecombine pgprot_writecombine
517 extern pgprot_t pgprot_writecombine(pgprot_t prot);
519 #define pgprot_writethrough pgprot_writethrough
520 extern pgprot_t pgprot_writethrough(pgprot_t prot);
522 /* Indicate that x86 has its own track and untrack pfn vma functions */
523 #define __HAVE_PFNMAP_TRACKING
525 #define __HAVE_PHYS_MEM_ACCESS_PROT
527 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
528 unsigned long size, pgprot_t vma_prot);
530 /* Install a pte for a particular vaddr in kernel space. */
531 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
534 extern void native_pagetable_init(void);
536 #define native_pagetable_init paging_init
540 extern void arch_report_meminfo(struct seq_file *m);
551 #ifdef CONFIG_PROC_FS
552 extern void update_page_count(int level, unsigned long pages);
554 static inline void update_page_count(int level, unsigned long pages) { }
558 * Helper function that returns the kernel pagetable entry controlling
559 * the virtual address 'address'. NULL means no pagetable entry present.
560 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
563 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
564 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
565 unsigned int *level);
566 extern pmd_t *lookup_pmd_address(unsigned long address);
567 extern phys_addr_t slow_virt_to_phys(void *__address);
568 extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
569 unsigned numpages, unsigned long page_flags);
570 #endif /* !__ASSEMBLY__ */
572 #endif /* _ASM_X86_PGTABLE_DEFS_H */