1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgtable.h"
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
23 extern pgd_t swapper_pg_dir[];
24 extern void paging_init(void);
33 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
35 static inline void update_page_count(int level, long count)
37 if (IS_ENABLED(CONFIG_PROC_FS))
38 atomic_long_add(count, &direct_pages_count[level]);
42 void arch_report_meminfo(struct seq_file *m);
45 * The S390 doesn't have any external MMU info: the kernel page
46 * tables contain all the necessary information.
48 #define update_mmu_cache(vma, address, ptep) do { } while (0)
49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
52 * ZERO_PAGE is a global shared page that is always zero; used
53 * for zero-mapped memory areas etc..
56 extern unsigned long empty_zero_page;
57 extern unsigned long zero_page_mask;
59 #define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
62 #define __HAVE_COLOR_ZERO_PAGE
64 /* TODO: s390 cannot support io_remap_pfn_range... */
66 #define FIRST_USER_ADDRESS 0UL
68 #define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70 #define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72 #define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74 #define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76 #define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
80 * The vmalloc and module area will always be on the topmost area of the
81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83 * modules will reside. That makes sure that inter module branches always
84 * happen without trampolines and in addition the placement within a 2GB frame
85 * is branch prediction unit friendly.
87 extern unsigned long VMALLOC_START;
88 extern unsigned long VMALLOC_END;
89 extern struct page *vmemmap;
91 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
93 extern unsigned long MODULES_VADDR;
94 extern unsigned long MODULES_END;
95 #define MODULES_VADDR MODULES_VADDR
96 #define MODULES_END MODULES_END
97 #define MODULES_LEN (1UL << 31)
99 static inline int is_module_addr(void *addr)
101 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
102 if (addr < (void *)MODULES_VADDR)
104 if (addr > (void *)MODULES_END)
110 * A 64 bit pagetable entry of S390 has following format:
112 * 0000000000111111111122222222223333333333444444444455555555556666
113 * 0123456789012345678901234567890123456789012345678901234567890123
115 * I Page-Invalid Bit: Page is not available for address-translation
116 * P Page-Protection Bit: Store access not possible for page
117 * C Change-bit override: HW is not required to set change bit
119 * A 64 bit segmenttable entry of S390 has following format:
120 * | P-table origin | TT
121 * 0000000000111111111122222222223333333333444444444455555555556666
122 * 0123456789012345678901234567890123456789012345678901234567890123
124 * I Segment-Invalid Bit: Segment is not available for address-translation
125 * C Common-Segment Bit: Segment is not private (PoP 3-30)
126 * P Page-Protection Bit: Store access not possible for page
129 * A 64 bit region table entry of S390 has following format:
130 * | S-table origin | TF TTTL
131 * 0000000000111111111122222222223333333333444444444455555555556666
132 * 0123456789012345678901234567890123456789012345678901234567890123
134 * I Segment-Invalid Bit: Segment is not available for address-translation
139 * The 64 bit regiontable origin of S390 has following format:
140 * | region table origon | DTTL
141 * 0000000000111111111122222222223333333333444444444455555555556666
142 * 0123456789012345678901234567890123456789012345678901234567890123
144 * X Space-Switch event:
145 * G Segment-Invalid Bit:
146 * P Private-Space Bit:
147 * S Storage-Alteration:
151 * A storage key has the following format:
155 * F : fetch protection bit
160 /* Hardware bits in the page table entry */
161 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
162 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
163 #define _PAGE_INVALID 0x400 /* HW invalid bit */
164 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
166 /* Software bits in the page table entry */
167 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
168 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
169 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
170 #define _PAGE_READ 0x010 /* SW pte read bit */
171 #define _PAGE_WRITE 0x020 /* SW pte write bit */
172 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
173 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
175 #ifdef CONFIG_MEM_SOFT_DIRTY
176 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
178 #define _PAGE_SOFT_DIRTY 0x000
181 /* Set of bits not changed in pte_modify */
182 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
183 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
186 * handle_pte_fault uses pte_present and pte_none to find out the pte type
187 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
188 * distinguish present from not-present ptes. It is changed only with the page
191 * The following table gives the different possible bit combinations for
192 * the pte hardware and software bits in the last 12 bits of a pte
193 * (. unassigned bit, x don't care, t swap type):
201 * prot-none, clean, old .11.xx0000.1
202 * prot-none, clean, young .11.xx0001.1
203 * prot-none, dirty, old .11.xx0010.1
204 * prot-none, dirty, young .11.xx0011.1
205 * read-only, clean, old .11.xx0100.1
206 * read-only, clean, young .01.xx0101.1
207 * read-only, dirty, old .11.xx0110.1
208 * read-only, dirty, young .01.xx0111.1
209 * read-write, clean, old .11.xx1100.1
210 * read-write, clean, young .01.xx1101.1
211 * read-write, dirty, old .10.xx1110.1
212 * read-write, dirty, young .00.xx1111.1
213 * HW-bits: R read-only, I invalid
214 * SW-bits: p present, y young, d dirty, r read, w write, s special,
217 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
218 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
219 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
222 /* Bits in the segment/region table address-space-control-element */
223 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
224 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
225 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
226 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
227 #define _ASCE_REAL_SPACE 0x20 /* real space control */
228 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
229 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
230 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
231 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
232 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
233 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
235 /* Bits in the region table entry */
236 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
237 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
238 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
239 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
240 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
241 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
242 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
243 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
244 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
245 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
247 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
248 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
249 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
250 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
251 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
252 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
254 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
255 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
256 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
257 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
258 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
259 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
261 #ifdef CONFIG_MEM_SOFT_DIRTY
262 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
264 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
267 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
268 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
270 /* Bits in the segment table entry */
271 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
273 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
275 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
276 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
277 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
278 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
279 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
281 #define _SEGMENT_ENTRY (0)
282 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
284 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
285 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
286 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
287 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
288 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
290 #ifdef CONFIG_MEM_SOFT_DIRTY
291 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
297 #define _PAGE_ENTRIES 256 /* number of page table entries */
299 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
300 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
302 #define _REGION1_SHIFT 53
303 #define _REGION2_SHIFT 42
304 #define _REGION3_SHIFT 31
305 #define _SEGMENT_SHIFT 20
307 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
308 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
309 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
310 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
311 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
313 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
314 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
315 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
316 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
318 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
319 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
320 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
321 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
323 #define PMD_SHIFT _SEGMENT_SHIFT
324 #define PUD_SHIFT _REGION3_SHIFT
325 #define P4D_SHIFT _REGION2_SHIFT
326 #define PGDIR_SHIFT _REGION1_SHIFT
328 #define PMD_SIZE _SEGMENT_SIZE
329 #define PUD_SIZE _REGION3_SIZE
330 #define P4D_SIZE _REGION2_SIZE
331 #define PGDIR_SIZE _REGION1_SIZE
333 #define PMD_MASK _SEGMENT_MASK
334 #define PUD_MASK _REGION3_MASK
335 #define P4D_MASK _REGION2_MASK
336 #define PGDIR_MASK _REGION1_MASK
338 #define PTRS_PER_PTE _PAGE_ENTRIES
339 #define PTRS_PER_PMD _CRST_ENTRIES
340 #define PTRS_PER_PUD _CRST_ENTRIES
341 #define PTRS_PER_P4D _CRST_ENTRIES
342 #define PTRS_PER_PGD _CRST_ENTRIES
345 * Segment table and region3 table entry encoding
346 * (R = read-only, I = invalid, y = young bit):
348 * prot-none, clean, old 00..1...1...00
349 * prot-none, clean, young 01..1...1...00
350 * prot-none, dirty, old 10..1...1...00
351 * prot-none, dirty, young 11..1...1...00
352 * read-only, clean, old 00..1...1...01
353 * read-only, clean, young 01..1...0...01
354 * read-only, dirty, old 10..1...1...01
355 * read-only, dirty, young 11..1...0...01
356 * read-write, clean, old 00..1...1...11
357 * read-write, clean, young 01..1...0...11
358 * read-write, dirty, old 10..0...1...11
359 * read-write, dirty, young 11..0...0...11
360 * The segment table origin is used to distinguish empty (origin==0) from
361 * read-write, old segment table entries (origin!=0)
362 * HW-bits: R read-only, I invalid
363 * SW-bits: y young, d dirty, r read, w write
366 /* Page status table bits for virtualization */
367 #define PGSTE_ACC_BITS 0xf000000000000000UL
368 #define PGSTE_FP_BIT 0x0800000000000000UL
369 #define PGSTE_PCL_BIT 0x0080000000000000UL
370 #define PGSTE_HR_BIT 0x0040000000000000UL
371 #define PGSTE_HC_BIT 0x0020000000000000UL
372 #define PGSTE_GR_BIT 0x0004000000000000UL
373 #define PGSTE_GC_BIT 0x0002000000000000UL
374 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
375 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
376 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
378 /* Guest Page State used for virtualization */
379 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
380 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
381 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
382 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
383 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
384 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
385 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
388 * A user page table pointer has the space-switch-event bit, the
389 * private-space-control bit and the storage-alteration-event-control
390 * bit set. A kernel page table pointer doesn't need them.
392 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
396 * Page protection definitions.
398 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
399 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
400 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_INVALID | _PAGE_PROTECT)
408 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
409 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
410 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
413 _PAGE_PROTECT | _PAGE_NOEXEC)
414 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 _PAGE_YOUNG | _PAGE_DIRTY)
418 * On s390 the page table entry has an invalid bit and a read-only bit.
419 * Read permission implies execute permission and write permission
420 * implies read permission.
423 #define __P000 PAGE_NONE
424 #define __P001 PAGE_RO
425 #define __P010 PAGE_RO
426 #define __P011 PAGE_RO
427 #define __P100 PAGE_RX
428 #define __P101 PAGE_RX
429 #define __P110 PAGE_RX
430 #define __P111 PAGE_RX
432 #define __S000 PAGE_NONE
433 #define __S001 PAGE_RO
434 #define __S010 PAGE_RW
435 #define __S011 PAGE_RW
436 #define __S100 PAGE_RX
437 #define __S101 PAGE_RX
438 #define __S110 PAGE_RWX
439 #define __S111 PAGE_RWX
442 * Segment entry (large page) protection definitions.
444 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
445 _SEGMENT_ENTRY_PROTECT)
446 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
447 _SEGMENT_ENTRY_READ | \
448 _SEGMENT_ENTRY_NOEXEC)
449 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
451 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
452 _SEGMENT_ENTRY_WRITE | \
453 _SEGMENT_ENTRY_NOEXEC)
454 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
455 _SEGMENT_ENTRY_WRITE)
456 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
457 _SEGMENT_ENTRY_LARGE | \
458 _SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_WRITE | \
460 _SEGMENT_ENTRY_YOUNG | \
461 _SEGMENT_ENTRY_DIRTY | \
462 _SEGMENT_ENTRY_NOEXEC)
463 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
464 _SEGMENT_ENTRY_LARGE | \
465 _SEGMENT_ENTRY_READ | \
466 _SEGMENT_ENTRY_YOUNG | \
467 _SEGMENT_ENTRY_PROTECT | \
468 _SEGMENT_ENTRY_NOEXEC)
471 * Region3 entry (large page) protection definitions.
474 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
475 _REGION3_ENTRY_LARGE | \
476 _REGION3_ENTRY_READ | \
477 _REGION3_ENTRY_WRITE | \
478 _REGION3_ENTRY_YOUNG | \
479 _REGION3_ENTRY_DIRTY | \
480 _REGION_ENTRY_NOEXEC)
481 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
482 _REGION3_ENTRY_LARGE | \
483 _REGION3_ENTRY_READ | \
484 _REGION3_ENTRY_YOUNG | \
485 _REGION_ENTRY_PROTECT | \
486 _REGION_ENTRY_NOEXEC)
488 static inline bool mm_p4d_folded(struct mm_struct *mm)
490 return mm->context.asce_limit <= _REGION1_SIZE;
492 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
494 static inline bool mm_pud_folded(struct mm_struct *mm)
496 return mm->context.asce_limit <= _REGION2_SIZE;
498 #define mm_pud_folded(mm) mm_pud_folded(mm)
500 static inline bool mm_pmd_folded(struct mm_struct *mm)
502 return mm->context.asce_limit <= _REGION3_SIZE;
504 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
506 static inline int mm_has_pgste(struct mm_struct *mm)
509 if (unlikely(mm->context.has_pgste))
515 static inline int mm_alloc_pgste(struct mm_struct *mm)
518 if (unlikely(mm->context.alloc_pgste))
525 * In the case that a guest uses storage keys
526 * faults should no longer be backed by zero pages
528 #define mm_forbids_zeropage mm_has_pgste
529 static inline int mm_uses_skeys(struct mm_struct *mm)
532 if (mm->context.uses_skeys)
538 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
540 register unsigned long reg2 asm("2") = old;
541 register unsigned long reg3 asm("3") = new;
542 unsigned long address = (unsigned long)ptr | 1;
546 : "+d" (reg2), "+m" (*ptr)
547 : "d" (reg3), "d" (address)
551 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
553 register unsigned long reg2 asm("2") = old;
554 register unsigned long reg3 asm("3") = new;
555 unsigned long address = (unsigned long)ptr | 1;
558 " .insn rre,0xb98a0000,%0,%3"
559 : "+d" (reg2), "+m" (*ptr)
560 : "d" (reg3), "d" (address)
564 #define CRDTE_DTT_PAGE 0x00UL
565 #define CRDTE_DTT_SEGMENT 0x10UL
566 #define CRDTE_DTT_REGION3 0x14UL
567 #define CRDTE_DTT_REGION2 0x18UL
568 #define CRDTE_DTT_REGION1 0x1cUL
570 static inline void crdte(unsigned long old, unsigned long new,
571 unsigned long table, unsigned long dtt,
572 unsigned long address, unsigned long asce)
574 register unsigned long reg2 asm("2") = old;
575 register unsigned long reg3 asm("3") = new;
576 register unsigned long reg4 asm("4") = table | dtt;
577 register unsigned long reg5 asm("5") = address;
579 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
581 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
586 * pgd/p4d/pud/pmd/pte query functions
588 static inline int pgd_folded(pgd_t pgd)
590 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
593 static inline int pgd_present(pgd_t pgd)
597 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
600 static inline int pgd_none(pgd_t pgd)
604 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
607 static inline int pgd_bad(pgd_t pgd)
610 * With dynamic page table levels the pgd can be a region table
611 * entry or a segment table entry. Check for the bit that are
612 * invalid for either table entry.
615 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
616 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
617 return (pgd_val(pgd) & mask) != 0;
620 static inline int p4d_folded(p4d_t p4d)
622 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
625 static inline int p4d_present(p4d_t p4d)
629 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
632 static inline int p4d_none(p4d_t p4d)
636 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
639 static inline unsigned long p4d_pfn(p4d_t p4d)
641 unsigned long origin_mask;
643 origin_mask = _REGION_ENTRY_ORIGIN;
644 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
647 static inline int pud_folded(pud_t pud)
649 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
652 static inline int pud_present(pud_t pud)
656 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
659 static inline int pud_none(pud_t pud)
663 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
666 static inline int pud_large(pud_t pud)
668 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
670 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
673 static inline unsigned long pud_pfn(pud_t pud)
675 unsigned long origin_mask;
677 origin_mask = _REGION_ENTRY_ORIGIN;
679 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
680 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
683 static inline int pmd_large(pmd_t pmd)
685 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
688 static inline int pmd_bad(pmd_t pmd)
691 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
692 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
695 static inline int pud_bad(pud_t pud)
697 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
698 return pmd_bad(__pmd(pud_val(pud)));
700 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
701 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
704 static inline int p4d_bad(p4d_t p4d)
706 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
707 return pud_bad(__pud(p4d_val(p4d)));
708 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
711 static inline int pmd_present(pmd_t pmd)
713 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
716 static inline int pmd_none(pmd_t pmd)
718 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
721 static inline unsigned long pmd_pfn(pmd_t pmd)
723 unsigned long origin_mask;
725 origin_mask = _SEGMENT_ENTRY_ORIGIN;
727 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
728 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
731 #define pmd_write pmd_write
732 static inline int pmd_write(pmd_t pmd)
734 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
737 static inline int pmd_dirty(pmd_t pmd)
741 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
745 static inline int pmd_young(pmd_t pmd)
749 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
753 static inline int pte_present(pte_t pte)
755 /* Bit pattern: (pte & 0x001) == 0x001 */
756 return (pte_val(pte) & _PAGE_PRESENT) != 0;
759 static inline int pte_none(pte_t pte)
761 /* Bit pattern: pte == 0x400 */
762 return pte_val(pte) == _PAGE_INVALID;
765 static inline int pte_swap(pte_t pte)
767 /* Bit pattern: (pte & 0x201) == 0x200 */
768 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
772 static inline int pte_special(pte_t pte)
774 return (pte_val(pte) & _PAGE_SPECIAL);
777 #define __HAVE_ARCH_PTE_SAME
778 static inline int pte_same(pte_t a, pte_t b)
780 return pte_val(a) == pte_val(b);
783 #ifdef CONFIG_NUMA_BALANCING
784 static inline int pte_protnone(pte_t pte)
786 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
789 static inline int pmd_protnone(pmd_t pmd)
791 /* pmd_large(pmd) implies pmd_present(pmd) */
792 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
796 static inline int pte_soft_dirty(pte_t pte)
798 return pte_val(pte) & _PAGE_SOFT_DIRTY;
800 #define pte_swp_soft_dirty pte_soft_dirty
802 static inline pte_t pte_mksoft_dirty(pte_t pte)
804 pte_val(pte) |= _PAGE_SOFT_DIRTY;
807 #define pte_swp_mksoft_dirty pte_mksoft_dirty
809 static inline pte_t pte_clear_soft_dirty(pte_t pte)
811 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
814 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
816 static inline int pmd_soft_dirty(pmd_t pmd)
818 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
821 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
823 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
827 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
829 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
834 * query functions pte_write/pte_dirty/pte_young only work if
835 * pte_present() is true. Undefined behaviour if not..
837 static inline int pte_write(pte_t pte)
839 return (pte_val(pte) & _PAGE_WRITE) != 0;
842 static inline int pte_dirty(pte_t pte)
844 return (pte_val(pte) & _PAGE_DIRTY) != 0;
847 static inline int pte_young(pte_t pte)
849 return (pte_val(pte) & _PAGE_YOUNG) != 0;
852 #define __HAVE_ARCH_PTE_UNUSED
853 static inline int pte_unused(pte_t pte)
855 return pte_val(pte) & _PAGE_UNUSED;
859 * pgd/pmd/pte modification functions
862 static inline void pgd_clear(pgd_t *pgd)
864 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
865 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
868 static inline void p4d_clear(p4d_t *p4d)
870 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
871 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
874 static inline void pud_clear(pud_t *pud)
876 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
877 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
880 static inline void pmd_clear(pmd_t *pmdp)
882 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
885 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
887 pte_val(*ptep) = _PAGE_INVALID;
891 * The following pte modification functions only work if
892 * pte_present() is true. Undefined behaviour if not..
894 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
896 pte_val(pte) &= _PAGE_CHG_MASK;
897 pte_val(pte) |= pgprot_val(newprot);
899 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
900 * has the invalid bit set, clear it again for readable, young pages
902 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
903 pte_val(pte) &= ~_PAGE_INVALID;
905 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
906 * protection bit set, clear it again for writable, dirty pages
908 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
909 pte_val(pte) &= ~_PAGE_PROTECT;
913 static inline pte_t pte_wrprotect(pte_t pte)
915 pte_val(pte) &= ~_PAGE_WRITE;
916 pte_val(pte) |= _PAGE_PROTECT;
920 static inline pte_t pte_mkwrite(pte_t pte)
922 pte_val(pte) |= _PAGE_WRITE;
923 if (pte_val(pte) & _PAGE_DIRTY)
924 pte_val(pte) &= ~_PAGE_PROTECT;
928 static inline pte_t pte_mkclean(pte_t pte)
930 pte_val(pte) &= ~_PAGE_DIRTY;
931 pte_val(pte) |= _PAGE_PROTECT;
935 static inline pte_t pte_mkdirty(pte_t pte)
937 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
938 if (pte_val(pte) & _PAGE_WRITE)
939 pte_val(pte) &= ~_PAGE_PROTECT;
943 static inline pte_t pte_mkold(pte_t pte)
945 pte_val(pte) &= ~_PAGE_YOUNG;
946 pte_val(pte) |= _PAGE_INVALID;
950 static inline pte_t pte_mkyoung(pte_t pte)
952 pte_val(pte) |= _PAGE_YOUNG;
953 if (pte_val(pte) & _PAGE_READ)
954 pte_val(pte) &= ~_PAGE_INVALID;
958 static inline pte_t pte_mkspecial(pte_t pte)
960 pte_val(pte) |= _PAGE_SPECIAL;
964 #ifdef CONFIG_HUGETLB_PAGE
965 static inline pte_t pte_mkhuge(pte_t pte)
967 pte_val(pte) |= _PAGE_LARGE;
972 #define IPTE_GLOBAL 0
975 #define IPTE_NODAT 0x400
976 #define IPTE_GUEST_ASCE 0x800
978 static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
979 unsigned long opt, unsigned long asce,
982 unsigned long pto = (unsigned long) ptep;
984 if (__builtin_constant_p(opt) && opt == 0) {
985 /* Invalidation + TLB flush for the pte */
987 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
988 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
993 /* Invalidate ptes with options + TLB flush of the ptes */
994 opt = opt | (asce & _ASCE_ORIGIN);
996 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
997 : [r2] "+a" (address), [r3] "+a" (opt)
998 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1001 static inline void __ptep_ipte_range(unsigned long address, int nr,
1002 pte_t *ptep, int local)
1004 unsigned long pto = (unsigned long) ptep;
1006 /* Invalidate a range of ptes + TLB flush of the ptes */
1009 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1010 : [r2] "+a" (address), [r3] "+a" (nr)
1011 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1012 } while (nr != 255);
1016 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1017 * both clear the TLB for the unmapped pte. The reason is that
1018 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1019 * to modify an active pte. The sequence is
1020 * 1) ptep_get_and_clear
1022 * 3) flush_tlb_range
1023 * On s390 the tlb needs to get flushed with the modification of the pte
1024 * if the pte is active. The only way how this can be implemented is to
1025 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1028 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1029 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1031 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1032 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1033 unsigned long addr, pte_t *ptep)
1037 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1038 return pte_young(pte);
1041 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1042 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1043 unsigned long address, pte_t *ptep)
1045 return ptep_test_and_clear_young(vma, address, ptep);
1048 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1049 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1050 unsigned long addr, pte_t *ptep)
1052 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1055 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1056 pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
1057 void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
1059 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1060 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1061 unsigned long addr, pte_t *ptep)
1063 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1067 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1068 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1069 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1070 * cannot be accessed while the batched unmap is running. In this case
1071 * full==1 and a simple pte_clear is enough. See tlb.h.
1073 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1074 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1076 pte_t *ptep, int full)
1080 *ptep = __pte(_PAGE_INVALID);
1083 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1086 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1087 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1088 unsigned long addr, pte_t *ptep)
1093 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1096 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1097 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1098 unsigned long addr, pte_t *ptep,
1099 pte_t entry, int dirty)
1101 if (pte_same(*ptep, entry))
1103 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1108 * Additional functions to handle KVM guest page tables
1110 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1111 pte_t *ptep, pte_t entry);
1112 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1113 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1114 pte_t *ptep, unsigned long bits);
1115 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1116 pte_t *ptep, int prot, unsigned long bit);
1117 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1118 pte_t *ptep , int reset);
1119 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1120 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1121 pte_t *sptep, pte_t *tptep, pte_t pte);
1122 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1124 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1126 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1127 unsigned char key, bool nq);
1128 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1129 unsigned char key, unsigned char *oldkey,
1130 bool nq, bool mr, bool mc);
1131 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1132 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1133 unsigned char *key);
1135 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1136 unsigned long bits, unsigned long value);
1137 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1138 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1139 unsigned long *oldpte, unsigned long *oldpgste);
1140 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1141 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1142 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1143 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1146 * Certain architectures need to do special things when PTEs
1147 * within a page table are directly modified. Thus, the following
1148 * hook is made available.
1150 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1151 pte_t *ptep, pte_t entry)
1153 if (pte_present(entry))
1154 pte_val(entry) &= ~_PAGE_UNUSED;
1155 if (mm_has_pgste(mm))
1156 ptep_set_pte_at(mm, addr, ptep, entry);
1162 * Conversion functions: convert a page and protection to a page entry,
1163 * and a page entry and page directory to the page they refer to.
1165 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1168 pte_val(__pte) = physpage + pgprot_val(pgprot);
1169 if (!MACHINE_HAS_NX)
1170 pte_val(__pte) &= ~_PAGE_NOEXEC;
1171 return pte_mkyoung(__pte);
1174 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1176 unsigned long physpage = page_to_phys(page);
1177 pte_t __pte = mk_pte_phys(physpage, pgprot);
1179 if (pte_write(__pte) && PageDirty(page))
1180 __pte = pte_mkdirty(__pte);
1184 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1185 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1186 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1187 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1188 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1190 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1191 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1193 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1194 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1195 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1196 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1198 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1200 p4d_t *p4d = (p4d_t *) pgd;
1202 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1203 p4d = (p4d_t *) pgd_deref(*pgd);
1204 return p4d + p4d_index(address);
1207 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1209 pud_t *pud = (pud_t *) p4d;
1211 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1212 pud = (pud_t *) p4d_deref(*p4d);
1213 return pud + pud_index(address);
1216 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1218 pmd_t *pmd = (pmd_t *) pud;
1220 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1221 pmd = (pmd_t *) pud_deref(*pud);
1222 return pmd + pmd_index(address);
1225 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1226 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1227 #define pte_page(x) pfn_to_page(pte_pfn(x))
1229 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1230 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1231 #define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
1233 /* Find an entry in the lowest level page table.. */
1234 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1235 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1236 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1237 #define pte_unmap(pte) do { } while (0)
1239 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1241 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1242 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1246 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1248 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1249 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1251 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1255 static inline pmd_t pmd_mkclean(pmd_t pmd)
1257 if (pmd_large(pmd)) {
1258 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1259 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1264 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1266 if (pmd_large(pmd)) {
1267 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1268 _SEGMENT_ENTRY_SOFT_DIRTY;
1269 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1270 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1275 static inline pud_t pud_wrprotect(pud_t pud)
1277 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1278 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1282 static inline pud_t pud_mkwrite(pud_t pud)
1284 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1285 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1287 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1291 static inline pud_t pud_mkclean(pud_t pud)
1293 if (pud_large(pud)) {
1294 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1295 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1300 static inline pud_t pud_mkdirty(pud_t pud)
1302 if (pud_large(pud)) {
1303 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1304 _REGION3_ENTRY_SOFT_DIRTY;
1305 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1306 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1311 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1312 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1315 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1316 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1318 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1319 return pgprot_val(SEGMENT_NONE);
1320 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1321 return pgprot_val(SEGMENT_RO);
1322 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1323 return pgprot_val(SEGMENT_RX);
1324 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1325 return pgprot_val(SEGMENT_RW);
1326 return pgprot_val(SEGMENT_RWX);
1329 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1331 if (pmd_large(pmd)) {
1332 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1333 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1334 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1339 static inline pmd_t pmd_mkold(pmd_t pmd)
1341 if (pmd_large(pmd)) {
1342 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1343 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1348 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1350 if (pmd_large(pmd)) {
1351 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1352 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1353 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1354 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1355 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1356 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1357 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1358 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1361 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1362 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1366 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1369 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1373 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1375 static inline void __pmdp_csp(pmd_t *pmdp)
1377 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1378 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1381 #define IDTE_GLOBAL 0
1382 #define IDTE_LOCAL 1
1384 #define IDTE_PTOA 0x0800
1385 #define IDTE_NODAT 0x1000
1386 #define IDTE_GUEST_ASCE 0x2000
1388 static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1389 unsigned long opt, unsigned long asce,
1394 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1395 if (__builtin_constant_p(opt) && opt == 0) {
1396 /* flush without guest asce */
1398 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1400 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1404 /* flush with guest asce */
1406 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1408 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1409 [r3] "a" (asce), [m4] "i" (local)
1414 static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1415 unsigned long opt, unsigned long asce,
1420 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1421 r3o |= _ASCE_TYPE_REGION3;
1422 if (__builtin_constant_p(opt) && opt == 0) {
1423 /* flush without guest asce */
1425 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1427 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1431 /* flush with guest asce */
1433 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1435 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1436 [r3] "a" (asce), [m4] "i" (local)
1441 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1442 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1443 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1445 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1447 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1448 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1451 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1452 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1454 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1455 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1456 unsigned long addr, pmd_t *pmdp,
1457 pmd_t entry, int dirty)
1459 VM_BUG_ON(addr & ~HPAGE_MASK);
1461 entry = pmd_mkyoung(entry);
1463 entry = pmd_mkdirty(entry);
1464 if (pmd_val(*pmdp) == pmd_val(entry))
1466 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1470 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1471 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1472 unsigned long addr, pmd_t *pmdp)
1476 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1477 return pmd_young(pmd);
1480 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1481 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1482 unsigned long addr, pmd_t *pmdp)
1484 VM_BUG_ON(addr & ~HPAGE_MASK);
1485 return pmdp_test_and_clear_young(vma, addr, pmdp);
1488 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1489 pmd_t *pmdp, pmd_t entry)
1491 if (!MACHINE_HAS_NX)
1492 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1496 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1498 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1499 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1500 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1504 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1505 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1506 unsigned long addr, pmd_t *pmdp)
1508 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1511 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1512 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1514 pmd_t *pmdp, int full)
1518 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1521 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1524 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1525 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1526 unsigned long addr, pmd_t *pmdp)
1528 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1531 #define __HAVE_ARCH_PMDP_INVALIDATE
1532 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1533 unsigned long addr, pmd_t *pmdp)
1535 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1537 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1540 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1541 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1542 unsigned long addr, pmd_t *pmdp)
1547 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1550 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1551 unsigned long address,
1554 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1556 #define pmdp_collapse_flush pmdp_collapse_flush
1558 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1559 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1561 static inline int pmd_trans_huge(pmd_t pmd)
1563 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1566 #define has_transparent_hugepage has_transparent_hugepage
1567 static inline int has_transparent_hugepage(void)
1569 return MACHINE_HAS_EDAT1 ? 1 : 0;
1571 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1574 * 64 bit swap entry format:
1575 * A page-table entry has some bits we have to treat in a special way.
1576 * Bits 52 and bit 55 have to be zero, otherwise a specification
1577 * exception will occur instead of a page translation exception. The
1578 * specification exception has the bad habit not to store necessary
1579 * information in the lowcore.
1580 * Bits 54 and 63 are used to indicate the page type.
1581 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1582 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1583 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1585 * | offset |01100|type |00|
1586 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1587 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1590 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1591 #define __SWP_OFFSET_SHIFT 12
1592 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1593 #define __SWP_TYPE_SHIFT 2
1595 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1599 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1600 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1601 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1605 static inline unsigned long __swp_type(swp_entry_t entry)
1607 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1610 static inline unsigned long __swp_offset(swp_entry_t entry)
1612 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1615 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1617 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1620 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1621 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1623 #define kern_addr_valid(addr) (1)
1625 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1626 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1627 extern int s390_enable_sie(void);
1628 extern int s390_enable_skey(void);
1629 extern void s390_reset_cmma(struct mm_struct *mm);
1631 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1632 #define HAVE_ARCH_UNMAPPED_AREA
1633 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1636 * No page table caches to initialise
1638 static inline void pgtable_cache_init(void) { }
1639 static inline void check_pgt_cache(void) { }
1641 #include <asm-generic/pgtable.h>
1643 #endif /* _S390_PAGE_H */