2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 static inline pte_t ptep_flush_direct(struct mm_struct *mm,
28 unsigned long addr, pte_t *ptep)
33 if (unlikely(pte_val(old) & _PAGE_INVALID))
35 atomic_inc(&mm->context.flush_count);
36 if (MACHINE_HAS_TLB_LC &&
37 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
38 __ptep_ipte(addr, ptep, IPTE_LOCAL);
40 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
41 atomic_dec(&mm->context.flush_count);
45 static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
46 unsigned long addr, pte_t *ptep)
51 if (unlikely(pte_val(old) & _PAGE_INVALID))
53 atomic_inc(&mm->context.flush_count);
54 if (cpumask_equal(&mm->context.cpu_attach_mask,
55 cpumask_of(smp_processor_id()))) {
56 pte_val(*ptep) |= _PAGE_INVALID;
57 mm->context.flush_mm = 1;
59 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
60 atomic_dec(&mm->context.flush_count);
64 static inline pgste_t pgste_get_lock(pte_t *ptep)
66 unsigned long new = 0;
73 " nihh %0,0xff7f\n" /* clear PCL bit in old */
74 " oihh %1,0x0080\n" /* set PCL bit in new */
77 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
78 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
83 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
87 " nihh %1,0xff7f\n" /* clear PCL bit */
89 : "=Q" (ptep[PTRS_PER_PTE])
90 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
95 static inline pgste_t pgste_get(pte_t *ptep)
97 unsigned long pgste = 0;
99 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
101 return __pgste(pgste);
104 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
107 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
111 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
112 struct mm_struct *mm)
115 unsigned long address, bits, skey;
117 if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
119 address = pte_val(pte) & PAGE_MASK;
120 skey = (unsigned long) page_get_storage_key(address);
121 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
122 /* Transfer page changed & referenced bit to guest bits in pgste */
123 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
124 /* Copy page access key and fetch protection bit to pgste */
125 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
126 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
132 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
133 struct mm_struct *mm)
136 unsigned long address;
139 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
141 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
142 address = pte_val(entry) & PAGE_MASK;
144 * Set page access key and fetch protection bit from pgste.
145 * The guest C/R information is still in the PGSTE, set real
148 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
149 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
150 page_set_storage_key(address, nkey, 0);
154 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
157 if ((pte_val(entry) & _PAGE_PRESENT) &&
158 (pte_val(entry) & _PAGE_WRITE) &&
159 !(pte_val(entry) & _PAGE_INVALID)) {
160 if (!MACHINE_HAS_ESOP) {
162 * Without enhanced suppression-on-protection force
163 * the dirty bit on for all writable ptes.
165 pte_val(entry) |= _PAGE_DIRTY;
166 pte_val(entry) &= ~_PAGE_PROTECT;
168 if (!(pte_val(entry) & _PAGE_PROTECT))
169 /* This pte allows write access, set user-dirty */
170 pgste_val(pgste) |= PGSTE_UC_BIT;
177 static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
179 pte_t *ptep, pgste_t pgste)
184 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
186 pgste_val(pgste) ^= bits;
187 ptep_notify(mm, addr, ptep, bits);
193 static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
194 unsigned long addr, pte_t *ptep)
196 pgste_t pgste = __pgste(0);
198 if (mm_has_pgste(mm)) {
199 pgste = pgste_get_lock(ptep);
200 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
205 static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
206 unsigned long addr, pte_t *ptep,
207 pgste_t pgste, pte_t old, pte_t new)
209 if (mm_has_pgste(mm)) {
210 if (pte_val(old) & _PAGE_INVALID)
211 pgste_set_key(ptep, pgste, new, mm);
212 if (pte_val(new) & _PAGE_INVALID) {
213 pgste = pgste_update_all(old, pgste, mm);
214 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
215 _PGSTE_GPS_USAGE_UNUSED)
216 pte_val(old) |= _PAGE_UNUSED;
218 pgste = pgste_set_pte(ptep, pgste, new);
219 pgste_set_unlock(ptep, pgste);
226 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
227 pte_t *ptep, pte_t new)
233 pgste = ptep_xchg_start(mm, addr, ptep);
234 old = ptep_flush_direct(mm, addr, ptep);
235 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
239 EXPORT_SYMBOL(ptep_xchg_direct);
241 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
242 pte_t *ptep, pte_t new)
248 pgste = ptep_xchg_start(mm, addr, ptep);
249 old = ptep_flush_lazy(mm, addr, ptep);
250 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
254 EXPORT_SYMBOL(ptep_xchg_lazy);
256 pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
263 pgste = ptep_xchg_start(mm, addr, ptep);
264 old = ptep_flush_lazy(mm, addr, ptep);
265 if (mm_has_pgste(mm)) {
266 pgste = pgste_update_all(old, pgste, mm);
267 pgste_set(ptep, pgste);
271 EXPORT_SYMBOL(ptep_modify_prot_start);
273 void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
274 pte_t *ptep, pte_t pte)
278 if (mm_has_pgste(mm)) {
279 pgste = pgste_get(ptep);
280 pgste_set_key(ptep, pgste, pte, mm);
281 pgste = pgste_set_pte(ptep, pgste, pte);
282 pgste_set_unlock(ptep, pgste);
288 EXPORT_SYMBOL(ptep_modify_prot_commit);
290 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
291 unsigned long addr, pmd_t *pmdp)
296 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
298 if (!MACHINE_HAS_IDTE) {
302 atomic_inc(&mm->context.flush_count);
303 if (MACHINE_HAS_TLB_LC &&
304 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
305 __pmdp_idte(addr, pmdp, IDTE_LOCAL);
307 __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
308 atomic_dec(&mm->context.flush_count);
312 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
313 unsigned long addr, pmd_t *pmdp)
318 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
320 atomic_inc(&mm->context.flush_count);
321 if (cpumask_equal(&mm->context.cpu_attach_mask,
322 cpumask_of(smp_processor_id()))) {
323 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
324 mm->context.flush_mm = 1;
325 } else if (MACHINE_HAS_IDTE)
326 __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
329 atomic_dec(&mm->context.flush_count);
333 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
334 pmd_t *pmdp, pmd_t new)
339 old = pmdp_flush_direct(mm, addr, pmdp);
344 EXPORT_SYMBOL(pmdp_xchg_direct);
346 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
347 pmd_t *pmdp, pmd_t new)
352 old = pmdp_flush_lazy(mm, addr, pmdp);
357 EXPORT_SYMBOL(pmdp_xchg_lazy);
359 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
360 unsigned long addr, pud_t *pudp)
365 if (pud_val(old) & _REGION_ENTRY_INVALID)
367 if (!MACHINE_HAS_IDTE) {
369 * Invalid bit position is the same for pmd and pud, so we can
370 * re-use _pmd_csp() here
372 __pmdp_csp((pmd_t *) pudp);
375 atomic_inc(&mm->context.flush_count);
376 if (MACHINE_HAS_TLB_LC &&
377 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
378 __pudp_idte(addr, pudp, IDTE_LOCAL);
380 __pudp_idte(addr, pudp, IDTE_GLOBAL);
381 atomic_dec(&mm->context.flush_count);
385 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
386 pud_t *pudp, pud_t new)
391 old = pudp_flush_direct(mm, addr, pudp);
396 EXPORT_SYMBOL(pudp_xchg_direct);
398 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
399 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
402 struct list_head *lh = (struct list_head *) pgtable;
404 assert_spin_locked(pmd_lockptr(mm, pmdp));
407 if (!pmd_huge_pte(mm, pmdp))
410 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
411 pmd_huge_pte(mm, pmdp) = pgtable;
414 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
416 struct list_head *lh;
420 assert_spin_locked(pmd_lockptr(mm, pmdp));
423 pgtable = pmd_huge_pte(mm, pmdp);
424 lh = (struct list_head *) pgtable;
426 pmd_huge_pte(mm, pmdp) = NULL;
428 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
431 ptep = (pte_t *) pgtable;
432 pte_val(*ptep) = _PAGE_INVALID;
434 pte_val(*ptep) = _PAGE_INVALID;
437 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
440 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
441 pte_t *ptep, pte_t entry)
445 /* the mm_has_pgste() check is done in set_pte_at() */
447 pgste = pgste_get_lock(ptep);
448 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
449 pgste_set_key(ptep, pgste, entry, mm);
450 pgste = pgste_set_pte(ptep, pgste, entry);
451 pgste_set_unlock(ptep, pgste);
455 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
460 pgste = pgste_get_lock(ptep);
461 pgste_val(pgste) |= PGSTE_IN_BIT;
462 pgste_set_unlock(ptep, pgste);
467 * ptep_force_prot - change access rights of a locked pte
468 * @mm: pointer to the process mm_struct
469 * @addr: virtual address in the guest address space
470 * @ptep: pointer to the page table entry
471 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
472 * @bit: pgste bit to set (e.g. for notification)
474 * Returns 0 if the access rights were changed and -EAGAIN if the current
475 * and requested access rights are incompatible.
477 int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
478 pte_t *ptep, int prot, unsigned long bit)
484 pgste = pgste_get_lock(ptep);
486 /* Check pte entry after all locks have been acquired */
487 pte_i = pte_val(entry) & _PAGE_INVALID;
488 pte_p = pte_val(entry) & _PAGE_PROTECT;
489 if ((pte_i && (prot != PROT_NONE)) ||
490 (pte_p && (prot & PROT_WRITE))) {
491 pgste_set_unlock(ptep, pgste);
494 /* Change access rights and set pgste bit */
495 if (prot == PROT_NONE && !pte_i) {
496 ptep_flush_direct(mm, addr, ptep);
497 pgste = pgste_update_all(entry, pgste, mm);
498 pte_val(entry) |= _PAGE_INVALID;
500 if (prot == PROT_READ && !pte_p) {
501 ptep_flush_direct(mm, addr, ptep);
502 pte_val(entry) &= ~_PAGE_INVALID;
503 pte_val(entry) |= _PAGE_PROTECT;
505 pgste_val(pgste) |= bit;
506 pgste = pgste_set_pte(ptep, pgste, entry);
507 pgste_set_unlock(ptep, pgste);
511 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
512 pte_t *sptep, pte_t *tptep, pte_t pte)
514 pgste_t spgste, tpgste;
518 if (!(pte_val(*tptep) & _PAGE_INVALID))
519 return 0; /* already shadowed */
520 spgste = pgste_get_lock(sptep);
522 if (!(pte_val(spte) & _PAGE_INVALID) &&
523 !((pte_val(spte) & _PAGE_PROTECT) &&
524 !(pte_val(pte) & _PAGE_PROTECT))) {
525 pgste_val(spgste) |= PGSTE_VSIE_BIT;
526 tpgste = pgste_get_lock(tptep);
527 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
528 (pte_val(pte) & _PAGE_PROTECT);
529 /* don't touch the storage key - it belongs to parent pgste */
530 tpgste = pgste_set_pte(tptep, tpgste, tpte);
531 pgste_set_unlock(tptep, tpgste);
534 pgste_set_unlock(sptep, spgste);
538 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
542 pgste = pgste_get_lock(ptep);
543 /* notifier is called by the caller */
544 ptep_flush_direct(mm, saddr, ptep);
545 /* don't touch the storage key - it belongs to parent pgste */
546 pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
547 pgste_set_unlock(ptep, pgste);
550 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
552 if (!non_swap_entry(entry))
553 dec_mm_counter(mm, MM_SWAPENTS);
554 else if (is_migration_entry(entry)) {
555 struct page *page = migration_entry_to_page(entry);
557 dec_mm_counter(mm, mm_counter(page));
559 free_swap_and_cache(entry);
562 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
563 pte_t *ptep, int reset)
565 unsigned long pgstev;
569 /* Zap unused and logically-zero pages */
571 pgste = pgste_get_lock(ptep);
572 pgstev = pgste_val(pgste);
574 if (!reset && pte_swap(pte) &&
575 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
576 (pgstev & _PGSTE_GPS_ZERO))) {
577 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
578 pte_clear(mm, addr, ptep);
581 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
582 pgste_set_unlock(ptep, pgste);
586 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
591 /* Clear storage key */
593 pgste = pgste_get_lock(ptep);
594 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
595 PGSTE_GR_BIT | PGSTE_GC_BIT);
596 ptev = pte_val(*ptep);
597 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
598 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
599 pgste_set_unlock(ptep, pgste);
604 * Test and reset if a guest page is dirty
606 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
617 pgd = pgd_offset(mm, addr);
618 pud = pud_alloc(mm, pgd, addr);
621 pmd = pmd_alloc(mm, pud, addr);
624 /* We can't run guests backed by huge pages, but userspace can
625 * still set them up and then try to migrate them without any
631 ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
635 pgste = pgste_get_lock(ptep);
636 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
637 pgste_val(pgste) &= ~PGSTE_UC_BIT;
639 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
640 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
641 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
642 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
643 pte_val(pte) |= _PAGE_PROTECT;
645 pte_val(pte) |= _PAGE_INVALID;
648 pgste_set_unlock(ptep, pgste);
653 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
655 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
656 unsigned char key, bool nq)
663 ptep = get_locked_pte(mm, addr, &ptl);
667 new = old = pgste_get_lock(ptep);
668 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
669 PGSTE_ACC_BITS | PGSTE_FP_BIT);
670 keyul = (unsigned long) key;
671 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
672 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
673 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
674 unsigned long address, bits, skey;
676 address = pte_val(*ptep) & PAGE_MASK;
677 skey = (unsigned long) page_get_storage_key(address);
678 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
679 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
680 /* Set storage key ACC and FP */
681 page_set_storage_key(address, skey, !nq);
682 /* Merge host changed & referenced into pgste */
683 pgste_val(new) |= bits << 52;
685 /* changing the guest storage key is considered a change of the page */
686 if ((pgste_val(new) ^ pgste_val(old)) &
687 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
688 pgste_val(new) |= PGSTE_UC_BIT;
690 pgste_set_unlock(ptep, new);
691 pte_unmap_unlock(ptep, ptl);
694 EXPORT_SYMBOL(set_guest_storage_key);
697 * Conditionally set a guest storage key (handling csske).
698 * oldkey will be updated when either mr or mc is set and a pointer is given.
700 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
701 * storage key was updated and -EFAULT on access errors.
703 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
704 unsigned char key, unsigned char *oldkey,
705 bool nq, bool mr, bool mc)
707 unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
710 /* we can drop the pgste lock between getting and setting the key */
712 rc = get_guest_storage_key(current->mm, addr, &tmp);
718 mask |= _PAGE_REFERENCED;
720 mask |= _PAGE_CHANGED;
721 if (!((tmp ^ key) & mask))
724 rc = set_guest_storage_key(current->mm, addr, key, nq);
725 return rc < 0 ? rc : 1;
727 EXPORT_SYMBOL(cond_set_guest_storage_key);
730 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
732 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
734 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
741 ptep = get_locked_pte(mm, addr, &ptl);
745 new = old = pgste_get_lock(ptep);
746 /* Reset guest reference bit only */
747 pgste_val(new) &= ~PGSTE_GR_BIT;
749 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
750 cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
751 /* Merge real referenced bit into host-set */
752 pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
754 /* Reflect guest's logical view, not physical */
755 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
756 /* Changing the guest storage key is considered a change of the page */
757 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
758 pgste_val(new) |= PGSTE_UC_BIT;
760 pgste_set_unlock(ptep, new);
761 pte_unmap_unlock(ptep, ptl);
764 EXPORT_SYMBOL(reset_guest_reference_bit);
766 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
773 ptep = get_locked_pte(mm, addr, &ptl);
777 pgste = pgste_get_lock(ptep);
778 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
779 if (!(pte_val(*ptep) & _PAGE_INVALID))
780 *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
781 /* Reflect guest's logical view, not physical */
782 *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
783 pgste_set_unlock(ptep, pgste);
784 pte_unmap_unlock(ptep, ptl);
787 EXPORT_SYMBOL(get_guest_storage_key);