1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2011
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
12 #include <linux/swap.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/rcupdate.h>
16 #include <linux/slab.h>
17 #include <linux/swapops.h>
18 #include <linux/sysctl.h>
19 #include <linux/ksm.h>
20 #include <linux/mman.h>
23 #include <asm/tlbflush.h>
24 #include <asm/mmu_context.h>
25 #include <asm/page-states.h>
27 pgprot_t pgprot_writecombine(pgprot_t prot)
30 * mio_wb_bit_mask may be set on a different CPU, but it is only set
31 * once at init and only read afterwards.
33 return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
35 EXPORT_SYMBOL_GPL(pgprot_writecombine);
37 pgprot_t pgprot_writethrough(pgprot_t prot)
40 * mio_wb_bit_mask may be set on a different CPU, but it is only set
41 * once at init and only read afterwards.
43 return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
45 EXPORT_SYMBOL_GPL(pgprot_writethrough);
47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
48 pte_t *ptep, int nodat)
50 unsigned long opt, asce;
52 if (MACHINE_HAS_TLB_GUEST) {
54 asce = READ_ONCE(mm->context.gmap_asce);
55 if (asce == 0UL || nodat)
58 asce = asce ? : mm->context.asce;
59 opt |= IPTE_GUEST_ASCE;
61 __ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
63 __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
67 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
68 pte_t *ptep, int nodat)
70 unsigned long opt, asce;
72 if (MACHINE_HAS_TLB_GUEST) {
74 asce = READ_ONCE(mm->context.gmap_asce);
75 if (asce == 0UL || nodat)
78 asce = asce ? : mm->context.asce;
79 opt |= IPTE_GUEST_ASCE;
81 __ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
83 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
87 static inline pte_t ptep_flush_direct(struct mm_struct *mm,
88 unsigned long addr, pte_t *ptep,
94 if (unlikely(pte_val(old) & _PAGE_INVALID))
96 atomic_inc(&mm->context.flush_count);
97 if (MACHINE_HAS_TLB_LC &&
98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
99 ptep_ipte_local(mm, addr, ptep, nodat);
101 ptep_ipte_global(mm, addr, ptep, nodat);
102 atomic_dec(&mm->context.flush_count);
106 static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
107 unsigned long addr, pte_t *ptep,
113 if (unlikely(pte_val(old) & _PAGE_INVALID))
115 atomic_inc(&mm->context.flush_count);
116 if (cpumask_equal(&mm->context.cpu_attach_mask,
117 cpumask_of(smp_processor_id()))) {
118 set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
119 mm->context.flush_mm = 1;
121 ptep_ipte_global(mm, addr, ptep, nodat);
122 atomic_dec(&mm->context.flush_count);
126 static inline pgste_t pgste_get_lock(pte_t *ptep)
128 unsigned long value = 0;
130 unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE);
133 value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
134 } while (value & PGSTE_PCL_BIT);
135 value |= PGSTE_PCL_BIT;
137 return __pgste(value);
140 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
144 WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT);
148 static inline pgste_t pgste_get(pte_t *ptep)
150 unsigned long pgste = 0;
152 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
154 return __pgste(pgste);
157 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
160 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
164 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
165 struct mm_struct *mm)
168 unsigned long address, bits, skey;
170 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
172 address = pte_val(pte) & PAGE_MASK;
173 skey = (unsigned long) page_get_storage_key(address);
174 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
175 /* Transfer page changed & referenced bit to guest bits in pgste */
176 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
177 /* Copy page access key and fetch protection bit to pgste */
178 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
179 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
185 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
186 struct mm_struct *mm)
189 unsigned long address;
192 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
194 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
195 address = pte_val(entry) & PAGE_MASK;
197 * Set page access key and fetch protection bit from pgste.
198 * The guest C/R information is still in the PGSTE, set real
201 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
202 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
203 page_set_storage_key(address, nkey, 0);
207 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
210 if ((pte_val(entry) & _PAGE_PRESENT) &&
211 (pte_val(entry) & _PAGE_WRITE) &&
212 !(pte_val(entry) & _PAGE_INVALID)) {
213 if (!MACHINE_HAS_ESOP) {
215 * Without enhanced suppression-on-protection force
216 * the dirty bit on for all writable ptes.
218 entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
219 entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
221 if (!(pte_val(entry) & _PAGE_PROTECT))
222 /* This pte allows write access, set user-dirty */
223 pgste_val(pgste) |= PGSTE_UC_BIT;
226 set_pte(ptep, entry);
230 static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
232 pte_t *ptep, pgste_t pgste)
237 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
239 pgste_val(pgste) ^= bits;
240 ptep_notify(mm, addr, ptep, bits);
246 static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
247 unsigned long addr, pte_t *ptep)
249 pgste_t pgste = __pgste(0);
251 if (mm_has_pgste(mm)) {
252 pgste = pgste_get_lock(ptep);
253 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
258 static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
259 unsigned long addr, pte_t *ptep,
260 pgste_t pgste, pte_t old, pte_t new)
262 if (mm_has_pgste(mm)) {
263 if (pte_val(old) & _PAGE_INVALID)
264 pgste_set_key(ptep, pgste, new, mm);
265 if (pte_val(new) & _PAGE_INVALID) {
266 pgste = pgste_update_all(old, pgste, mm);
267 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
268 _PGSTE_GPS_USAGE_UNUSED)
269 old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
271 pgste = pgste_set_pte(ptep, pgste, new);
272 pgste_set_unlock(ptep, pgste);
279 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep, pte_t new)
287 pgste = ptep_xchg_start(mm, addr, ptep);
288 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
289 old = ptep_flush_direct(mm, addr, ptep, nodat);
290 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
294 EXPORT_SYMBOL(ptep_xchg_direct);
297 * Caller must check that new PTE only differs in _PAGE_PROTECT HW bit, so that
298 * RDP can be used instead of IPTE. See also comments at pte_allow_rdp().
300 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
304 atomic_inc(&mm->context.flush_count);
305 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
306 __ptep_rdp(addr, ptep, 0, 0, 1);
308 __ptep_rdp(addr, ptep, 0, 0, 0);
310 * PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That
311 * means it is still valid and active, and must not be changed according
312 * to the architecture. But writing a new value that only differs in SW
316 atomic_dec(&mm->context.flush_count);
319 EXPORT_SYMBOL(ptep_reset_dat_prot);
321 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
322 pte_t *ptep, pte_t new)
329 pgste = ptep_xchg_start(mm, addr, ptep);
330 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
331 old = ptep_flush_lazy(mm, addr, ptep, nodat);
332 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
336 EXPORT_SYMBOL(ptep_xchg_lazy);
338 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
344 struct mm_struct *mm = vma->vm_mm;
347 pgste = ptep_xchg_start(mm, addr, ptep);
348 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
349 old = ptep_flush_lazy(mm, addr, ptep, nodat);
350 if (mm_has_pgste(mm)) {
351 pgste = pgste_update_all(old, pgste, mm);
352 pgste_set(ptep, pgste);
357 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
358 pte_t *ptep, pte_t old_pte, pte_t pte)
361 struct mm_struct *mm = vma->vm_mm;
364 pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
365 if (mm_has_pgste(mm)) {
366 pgste = pgste_get(ptep);
367 pgste_set_key(ptep, pgste, pte, mm);
368 pgste = pgste_set_pte(ptep, pgste, pte);
369 pgste_set_unlock(ptep, pgste);
376 static inline void pmdp_idte_local(struct mm_struct *mm,
377 unsigned long addr, pmd_t *pmdp)
379 if (MACHINE_HAS_TLB_GUEST)
380 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
381 mm->context.asce, IDTE_LOCAL);
383 __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
384 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
385 gmap_pmdp_idte_local(mm, addr);
388 static inline void pmdp_idte_global(struct mm_struct *mm,
389 unsigned long addr, pmd_t *pmdp)
391 if (MACHINE_HAS_TLB_GUEST) {
392 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
393 mm->context.asce, IDTE_GLOBAL);
394 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
395 gmap_pmdp_idte_global(mm, addr);
396 } else if (MACHINE_HAS_IDTE) {
397 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
398 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
399 gmap_pmdp_idte_global(mm, addr);
402 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
403 gmap_pmdp_csp(mm, addr);
407 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
408 unsigned long addr, pmd_t *pmdp)
413 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
415 atomic_inc(&mm->context.flush_count);
416 if (MACHINE_HAS_TLB_LC &&
417 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
418 pmdp_idte_local(mm, addr, pmdp);
420 pmdp_idte_global(mm, addr, pmdp);
421 atomic_dec(&mm->context.flush_count);
425 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
426 unsigned long addr, pmd_t *pmdp)
431 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
433 atomic_inc(&mm->context.flush_count);
434 if (cpumask_equal(&mm->context.cpu_attach_mask,
435 cpumask_of(smp_processor_id()))) {
436 set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
437 mm->context.flush_mm = 1;
438 if (mm_has_pgste(mm))
439 gmap_pmdp_invalidate(mm, addr);
441 pmdp_idte_global(mm, addr, pmdp);
443 atomic_dec(&mm->context.flush_count);
448 static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
450 struct vm_area_struct *vma;
455 /* We need a valid VMA, otherwise this is clearly a fault. */
456 vma = vma_lookup(mm, addr);
460 pgd = pgd_offset(mm, addr);
461 if (!pgd_present(*pgd))
464 p4d = p4d_offset(pgd, addr);
465 if (!p4d_present(*p4d))
468 pud = pud_offset(p4d, addr);
469 if (!pud_present(*pud))
472 /* Large PUDs are not supported yet. */
476 *pmdp = pmd_offset(pud, addr);
481 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
482 pmd_t *pmdp, pmd_t new)
487 old = pmdp_flush_direct(mm, addr, pmdp);
492 EXPORT_SYMBOL(pmdp_xchg_direct);
494 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
495 pmd_t *pmdp, pmd_t new)
500 old = pmdp_flush_lazy(mm, addr, pmdp);
505 EXPORT_SYMBOL(pmdp_xchg_lazy);
507 static inline void pudp_idte_local(struct mm_struct *mm,
508 unsigned long addr, pud_t *pudp)
510 if (MACHINE_HAS_TLB_GUEST)
511 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
512 mm->context.asce, IDTE_LOCAL);
514 __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
517 static inline void pudp_idte_global(struct mm_struct *mm,
518 unsigned long addr, pud_t *pudp)
520 if (MACHINE_HAS_TLB_GUEST)
521 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
522 mm->context.asce, IDTE_GLOBAL);
523 else if (MACHINE_HAS_IDTE)
524 __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
527 * Invalid bit position is the same for pmd and pud, so we can
528 * re-use _pmd_csp() here
530 __pmdp_csp((pmd_t *) pudp);
533 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
534 unsigned long addr, pud_t *pudp)
539 if (pud_val(old) & _REGION_ENTRY_INVALID)
541 atomic_inc(&mm->context.flush_count);
542 if (MACHINE_HAS_TLB_LC &&
543 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
544 pudp_idte_local(mm, addr, pudp);
546 pudp_idte_global(mm, addr, pudp);
547 atomic_dec(&mm->context.flush_count);
551 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
552 pud_t *pudp, pud_t new)
557 old = pudp_flush_direct(mm, addr, pudp);
562 EXPORT_SYMBOL(pudp_xchg_direct);
564 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
565 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
568 struct list_head *lh = (struct list_head *) pgtable;
570 assert_spin_locked(pmd_lockptr(mm, pmdp));
573 if (!pmd_huge_pte(mm, pmdp))
576 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
577 pmd_huge_pte(mm, pmdp) = pgtable;
580 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
582 struct list_head *lh;
586 assert_spin_locked(pmd_lockptr(mm, pmdp));
589 pgtable = pmd_huge_pte(mm, pmdp);
590 lh = (struct list_head *) pgtable;
592 pmd_huge_pte(mm, pmdp) = NULL;
594 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
597 ptep = (pte_t *) pgtable;
598 set_pte(ptep, __pte(_PAGE_INVALID));
600 set_pte(ptep, __pte(_PAGE_INVALID));
603 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
606 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
607 pte_t *ptep, pte_t entry)
611 /* the mm_has_pgste() check is done in set_pte_at() */
613 pgste = pgste_get_lock(ptep);
614 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
615 pgste_set_key(ptep, pgste, entry, mm);
616 pgste = pgste_set_pte(ptep, pgste, entry);
617 pgste_set_unlock(ptep, pgste);
621 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
626 pgste = pgste_get_lock(ptep);
627 pgste_val(pgste) |= PGSTE_IN_BIT;
628 pgste_set_unlock(ptep, pgste);
633 * ptep_force_prot - change access rights of a locked pte
634 * @mm: pointer to the process mm_struct
635 * @addr: virtual address in the guest address space
636 * @ptep: pointer to the page table entry
637 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
638 * @bit: pgste bit to set (e.g. for notification)
640 * Returns 0 if the access rights were changed and -EAGAIN if the current
641 * and requested access rights are incompatible.
643 int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
644 pte_t *ptep, int prot, unsigned long bit)
648 int pte_i, pte_p, nodat;
650 pgste = pgste_get_lock(ptep);
652 /* Check pte entry after all locks have been acquired */
653 pte_i = pte_val(entry) & _PAGE_INVALID;
654 pte_p = pte_val(entry) & _PAGE_PROTECT;
655 if ((pte_i && (prot != PROT_NONE)) ||
656 (pte_p && (prot & PROT_WRITE))) {
657 pgste_set_unlock(ptep, pgste);
660 /* Change access rights and set pgste bit */
661 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
662 if (prot == PROT_NONE && !pte_i) {
663 ptep_flush_direct(mm, addr, ptep, nodat);
664 pgste = pgste_update_all(entry, pgste, mm);
665 entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
667 if (prot == PROT_READ && !pte_p) {
668 ptep_flush_direct(mm, addr, ptep, nodat);
669 entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
670 entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
672 pgste_val(pgste) |= bit;
673 pgste = pgste_set_pte(ptep, pgste, entry);
674 pgste_set_unlock(ptep, pgste);
678 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
679 pte_t *sptep, pte_t *tptep, pte_t pte)
681 pgste_t spgste, tpgste;
685 if (!(pte_val(*tptep) & _PAGE_INVALID))
686 return 0; /* already shadowed */
687 spgste = pgste_get_lock(sptep);
689 if (!(pte_val(spte) & _PAGE_INVALID) &&
690 !((pte_val(spte) & _PAGE_PROTECT) &&
691 !(pte_val(pte) & _PAGE_PROTECT))) {
692 pgste_val(spgste) |= PGSTE_VSIE_BIT;
693 tpgste = pgste_get_lock(tptep);
694 tpte = __pte((pte_val(spte) & PAGE_MASK) |
695 (pte_val(pte) & _PAGE_PROTECT));
696 /* don't touch the storage key - it belongs to parent pgste */
697 tpgste = pgste_set_pte(tptep, tpgste, tpte);
698 pgste_set_unlock(tptep, tpgste);
701 pgste_set_unlock(sptep, spgste);
705 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
710 pgste = pgste_get_lock(ptep);
711 /* notifier is called by the caller */
712 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
713 ptep_flush_direct(mm, saddr, ptep, nodat);
714 /* don't touch the storage key - it belongs to parent pgste */
715 pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
716 pgste_set_unlock(ptep, pgste);
719 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
721 if (!non_swap_entry(entry))
722 dec_mm_counter(mm, MM_SWAPENTS);
723 else if (is_migration_entry(entry)) {
724 struct page *page = pfn_swap_entry_to_page(entry);
726 dec_mm_counter(mm, mm_counter(page));
728 free_swap_and_cache(entry);
731 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
732 pte_t *ptep, int reset)
734 unsigned long pgstev;
738 /* Zap unused and logically-zero pages */
740 pgste = pgste_get_lock(ptep);
741 pgstev = pgste_val(pgste);
743 if (!reset && pte_swap(pte) &&
744 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
745 (pgstev & _PGSTE_GPS_ZERO))) {
746 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
747 pte_clear(mm, addr, ptep);
750 pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
751 pgste_set_unlock(ptep, pgste);
755 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
760 /* Clear storage key ACC and F, but set R/C */
762 pgste = pgste_get_lock(ptep);
763 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
764 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
765 ptev = pte_val(*ptep);
766 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
767 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
768 pgste_set_unlock(ptep, pgste);
773 * Test and reset if a guest page is dirty
775 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
783 pgste = pgste_get_lock(ptep);
784 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
785 pgste_val(pgste) &= ~PGSTE_UC_BIT;
787 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
788 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
789 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
790 ptep_ipte_global(mm, addr, ptep, nodat);
791 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
792 pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
794 pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
797 pgste_set_unlock(ptep, pgste);
800 EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
802 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
803 unsigned char key, bool nq)
805 unsigned long keyul, paddr;
812 * If we don't have a PTE table and if there is no huge page mapped,
813 * we can ignore attempts to set the key to 0, because it already is 0.
815 switch (pmd_lookup(mm, addr, &pmdp)) {
817 return key ? -EFAULT : 0;
824 ptl = pmd_lock(mm, pmdp);
825 if (!pmd_present(*pmdp)) {
827 return key ? -EFAULT : 0;
830 if (pmd_large(*pmdp)) {
831 paddr = pmd_val(*pmdp) & HPAGE_MASK;
832 paddr |= addr & ~HPAGE_MASK;
834 * Huge pmds need quiescing operations, they are
837 page_set_storage_key(paddr, key, 1);
843 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
846 new = old = pgste_get_lock(ptep);
847 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
848 PGSTE_ACC_BITS | PGSTE_FP_BIT);
849 keyul = (unsigned long) key;
850 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
851 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
852 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
853 unsigned long bits, skey;
855 paddr = pte_val(*ptep) & PAGE_MASK;
856 skey = (unsigned long) page_get_storage_key(paddr);
857 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
858 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
859 /* Set storage key ACC and FP */
860 page_set_storage_key(paddr, skey, !nq);
861 /* Merge host changed & referenced into pgste */
862 pgste_val(new) |= bits << 52;
864 /* changing the guest storage key is considered a change of the page */
865 if ((pgste_val(new) ^ pgste_val(old)) &
866 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
867 pgste_val(new) |= PGSTE_UC_BIT;
869 pgste_set_unlock(ptep, new);
870 pte_unmap_unlock(ptep, ptl);
873 EXPORT_SYMBOL(set_guest_storage_key);
876 * Conditionally set a guest storage key (handling csske).
877 * oldkey will be updated when either mr or mc is set and a pointer is given.
879 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
880 * storage key was updated and -EFAULT on access errors.
882 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
883 unsigned char key, unsigned char *oldkey,
884 bool nq, bool mr, bool mc)
886 unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
889 /* we can drop the pgste lock between getting and setting the key */
891 rc = get_guest_storage_key(current->mm, addr, &tmp);
897 mask |= _PAGE_REFERENCED;
899 mask |= _PAGE_CHANGED;
900 if (!((tmp ^ key) & mask))
903 rc = set_guest_storage_key(current->mm, addr, key, nq);
904 return rc < 0 ? rc : 1;
906 EXPORT_SYMBOL(cond_set_guest_storage_key);
909 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
911 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
913 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
923 * If we don't have a PTE table and if there is no huge page mapped,
924 * the storage key is 0 and there is nothing for us to do.
926 switch (pmd_lookup(mm, addr, &pmdp)) {
935 ptl = pmd_lock(mm, pmdp);
936 if (!pmd_present(*pmdp)) {
941 if (pmd_large(*pmdp)) {
942 paddr = pmd_val(*pmdp) & HPAGE_MASK;
943 paddr |= addr & ~HPAGE_MASK;
944 cc = page_reset_referenced(paddr);
950 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
953 new = old = pgste_get_lock(ptep);
954 /* Reset guest reference bit only */
955 pgste_val(new) &= ~PGSTE_GR_BIT;
957 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
958 paddr = pte_val(*ptep) & PAGE_MASK;
959 cc = page_reset_referenced(paddr);
960 /* Merge real referenced bit into host-set */
961 pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
963 /* Reflect guest's logical view, not physical */
964 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
965 /* Changing the guest storage key is considered a change of the page */
966 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
967 pgste_val(new) |= PGSTE_UC_BIT;
969 pgste_set_unlock(ptep, new);
970 pte_unmap_unlock(ptep, ptl);
973 EXPORT_SYMBOL(reset_guest_reference_bit);
975 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
985 * If we don't have a PTE table and if there is no huge page mapped,
986 * the storage key is 0.
990 switch (pmd_lookup(mm, addr, &pmdp)) {
999 ptl = pmd_lock(mm, pmdp);
1000 if (!pmd_present(*pmdp)) {
1005 if (pmd_large(*pmdp)) {
1006 paddr = pmd_val(*pmdp) & HPAGE_MASK;
1007 paddr |= addr & ~HPAGE_MASK;
1008 *key = page_get_storage_key(paddr);
1014 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
1017 pgste = pgste_get_lock(ptep);
1018 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
1019 paddr = pte_val(*ptep) & PAGE_MASK;
1020 if (!(pte_val(*ptep) & _PAGE_INVALID))
1021 *key = page_get_storage_key(paddr);
1022 /* Reflect guest's logical view, not physical */
1023 *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
1024 pgste_set_unlock(ptep, pgste);
1025 pte_unmap_unlock(ptep, ptl);
1028 EXPORT_SYMBOL(get_guest_storage_key);
1031 * pgste_perform_essa - perform ESSA actions on the PGSTE.
1032 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1033 * @hva: the host virtual address of the page whose PGSTE is to be processed
1034 * @orc: the specific action to perform, see the ESSA_SET_* macros.
1035 * @oldpte: the PTE will be saved there if the pointer is not NULL.
1036 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
1038 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
1039 * or < 0 in case of error. -EINVAL is returned for invalid values
1040 * of orc, -EFAULT for invalid addresses.
1042 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1043 unsigned long *oldpte, unsigned long *oldpgste)
1045 struct vm_area_struct *vma;
1046 unsigned long pgstev;
1052 WARN_ON_ONCE(orc > ESSA_MAX);
1053 if (unlikely(orc > ESSA_MAX))
1056 vma = vma_lookup(mm, hva);
1057 if (!vma || is_vm_hugetlb_page(vma))
1059 ptep = get_locked_pte(mm, hva, &ptl);
1060 if (unlikely(!ptep))
1062 pgste = pgste_get_lock(ptep);
1063 pgstev = pgste_val(pgste);
1065 *oldpte = pte_val(*ptep);
1070 case ESSA_GET_STATE:
1072 case ESSA_SET_STABLE:
1073 pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
1074 pgstev |= _PGSTE_GPS_USAGE_STABLE;
1076 case ESSA_SET_UNUSED:
1077 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1078 pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1079 if (pte_val(*ptep) & _PAGE_INVALID)
1082 case ESSA_SET_VOLATILE:
1083 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1084 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1085 if (pte_val(*ptep) & _PAGE_INVALID)
1088 case ESSA_SET_POT_VOLATILE:
1089 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1090 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1091 pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1094 if (pgstev & _PGSTE_GPS_ZERO) {
1095 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1098 if (!(pgstev & PGSTE_GC_BIT)) {
1099 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1104 case ESSA_SET_STABLE_RESIDENT:
1105 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1106 pgstev |= _PGSTE_GPS_USAGE_STABLE;
1108 * Since the resident state can go away any time after this
1109 * call, we will not make this page resident. We can revisit
1110 * this decision if a guest will ever start using this.
1113 case ESSA_SET_STABLE_IF_RESIDENT:
1114 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1115 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1116 pgstev |= _PGSTE_GPS_USAGE_STABLE;
1119 case ESSA_SET_STABLE_NODAT:
1120 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1121 pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1124 /* we should never get here! */
1127 /* If we are discarding a page, set it to logical zero */
1129 pgstev |= _PGSTE_GPS_ZERO;
1131 pgste_val(pgste) = pgstev;
1132 pgste_set_unlock(ptep, pgste);
1133 pte_unmap_unlock(ptep, ptl);
1136 EXPORT_SYMBOL(pgste_perform_essa);
1139 * set_pgste_bits - set specific PGSTE bits.
1140 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1141 * @hva: the host virtual address of the page whose PGSTE is to be processed
1142 * @bits: a bitmask representing the bits that will be touched
1143 * @value: the values of the bits to be written. Only the bits in the mask
1146 * Return: 0 on success, < 0 in case of error.
1148 int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1149 unsigned long bits, unsigned long value)
1151 struct vm_area_struct *vma;
1156 vma = vma_lookup(mm, hva);
1157 if (!vma || is_vm_hugetlb_page(vma))
1159 ptep = get_locked_pte(mm, hva, &ptl);
1160 if (unlikely(!ptep))
1162 new = pgste_get_lock(ptep);
1164 pgste_val(new) &= ~bits;
1165 pgste_val(new) |= value & bits;
1167 pgste_set_unlock(ptep, new);
1168 pte_unmap_unlock(ptep, ptl);
1171 EXPORT_SYMBOL(set_pgste_bits);
1174 * get_pgste - get the current PGSTE for the given address.
1175 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1176 * @hva: the host virtual address of the page whose PGSTE is to be processed
1177 * @pgstep: will be written with the current PGSTE for the given address.
1179 * Return: 0 on success, < 0 in case of error.
1181 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1183 struct vm_area_struct *vma;
1187 vma = vma_lookup(mm, hva);
1188 if (!vma || is_vm_hugetlb_page(vma))
1190 ptep = get_locked_pte(mm, hva, &ptl);
1191 if (unlikely(!ptep))
1193 *pgstep = pgste_val(pgste_get(ptep));
1194 pte_unmap_unlock(ptep, ptl);
1197 EXPORT_SYMBOL(get_pgste);