1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hibernate support specific for ARM64
5 * Derived from work on ARM hibernation support by:
7 * Ubuntu project, hibernation support for mach-dove
8 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10 * https://lkml.org/lkml/2010/6/18/4
11 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
12 * https://patchwork.kernel.org/patch/96442/
14 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
16 #define pr_fmt(x) "hibernate: " x
17 #include <linux/cpu.h>
18 #include <linux/kvm_host.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/utsname.h>
24 #include <asm/barrier.h>
25 #include <asm/cacheflush.h>
26 #include <asm/cputype.h>
27 #include <asm/daifflags.h>
28 #include <asm/irqflags.h>
29 #include <asm/kexec.h>
30 #include <asm/memory.h>
31 #include <asm/mmu_context.h>
33 #include <asm/sections.h>
35 #include <asm/smp_plat.h>
36 #include <asm/suspend.h>
37 #include <asm/sysreg.h>
38 #include <asm/trans_pgd.h>
42 * Hibernate core relies on this value being 0 on resume, and marks it
43 * __nosavedata assuming it will keep the resume kernel's '0' value. This
44 * doesn't happen with either KASLR.
46 * defined as "__visible int in_suspend __nosavedata" in
47 * kernel/power/hibernate.c
49 extern int in_suspend;
51 /* Do we need to reset el2? */
52 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
54 /* temporary el2 vectors in the __hibernate_exit_text section. */
55 extern char hibernate_el2_vectors[];
57 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
58 extern char __hyp_stub_vectors[];
61 * The logical cpu number we should resume on, initialised to a non-cpu
64 static int sleep_cpu = -EINVAL;
67 * Values that may not change over hibernate/resume. We put the build number
68 * and date in here so that we guarantee not to resume with a different
71 struct arch_hibernate_hdr_invariants {
72 char uts_version[__NEW_UTS_LEN + 1];
75 /* These values need to be know across a hibernate/restore. */
76 static struct arch_hibernate_hdr {
77 struct arch_hibernate_hdr_invariants invariants;
79 /* These are needed to find the relocated kernel if built with kaslr */
80 phys_addr_t ttbr1_el1;
81 void (*reenter_kernel)(void);
84 * We need to know where the __hyp_stub_vectors are after restore to
87 phys_addr_t __hyp_stub_vectors;
92 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
94 memset(i, 0, sizeof(*i));
95 memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
98 int pfn_is_nosave(unsigned long pfn)
100 unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
101 unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
103 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
104 crash_is_nosave(pfn);
107 void notrace save_processor_state(void)
109 WARN_ON(num_online_cpus() != 1);
112 void notrace restore_processor_state(void)
116 int arch_hibernation_header_save(void *addr, unsigned int max_size)
118 struct arch_hibernate_hdr *hdr = addr;
120 if (max_size < sizeof(*hdr))
123 arch_hdr_invariants(&hdr->invariants);
124 hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
125 hdr->reenter_kernel = _cpu_resume;
127 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
128 if (el2_reset_needed())
129 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
131 hdr->__hyp_stub_vectors = 0;
133 /* Save the mpidr of the cpu we called cpu_suspend() on... */
135 pr_err("Failing to hibernate on an unknown CPU.\n");
138 hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
139 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
140 hdr->sleep_cpu_mpidr);
144 EXPORT_SYMBOL(arch_hibernation_header_save);
146 int arch_hibernation_header_restore(void *addr)
149 struct arch_hibernate_hdr_invariants invariants;
150 struct arch_hibernate_hdr *hdr = addr;
152 arch_hdr_invariants(&invariants);
153 if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
154 pr_crit("Hibernate image not generated by this kernel!\n");
158 sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
159 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
160 hdr->sleep_cpu_mpidr);
162 pr_crit("Hibernated on a CPU not known to this kernel!\n");
167 ret = bringup_hibernate_cpu(sleep_cpu);
177 EXPORT_SYMBOL(arch_hibernation_header_restore);
179 static void *hibernate_page_alloc(void *arg)
181 return (void *)get_safe_page((__force gfp_t)(unsigned long)arg);
185 * Copies length bytes, starting at src_start into an new page,
186 * perform cache maintenance, then maps it at the specified address low
187 * address as executable.
189 * This is used by hibernate to copy the code it needs to execute when
190 * overwriting the kernel text. This function generates a new set of page
191 * tables, which it loads into ttbr0.
193 * Length is provided as we probably only want 4K of data, even on a 64K
196 static int create_safe_exec_page(void *src_start, size_t length,
197 phys_addr_t *phys_dst_addr)
199 struct trans_pgd_info trans_info = {
200 .trans_alloc_page = hibernate_page_alloc,
201 .trans_alloc_arg = (__force void *)GFP_ATOMIC,
204 void *page = (void *)get_safe_page(GFP_ATOMIC);
205 phys_addr_t trans_ttbr0;
212 memcpy(page, src_start, length);
213 caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
214 rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
219 * Load our new page tables. A strict BBM approach requires that we
220 * ensure that TLBs are free of any entries that may overlap with the
221 * global mappings we are about to install.
223 * For a real hibernate/resume cycle TTBR0 currently points to a zero
224 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
225 * runtime services), while for a userspace-driven test_resume cycle it
226 * points to userspace page tables (and we must point it at a zero page
229 * We change T0SZ as part of installing the idmap. This is undone by
230 * cpu_uninstall_idmap() in __cpu_suspend_exit().
232 cpu_set_reserved_ttbr0();
233 local_flush_tlb_all();
234 __cpu_set_tcr_t0sz(t0sz);
235 write_sysreg(trans_ttbr0, ttbr0_el1);
238 *phys_dst_addr = virt_to_phys(page);
243 #ifdef CONFIG_ARM64_MTE
245 static DEFINE_XARRAY(mte_pages);
247 static int save_tags(struct page *page, unsigned long pfn)
249 void *tag_storage, *ret;
251 tag_storage = mte_allocate_tag_storage();
255 mte_save_page_tags(page_address(page), tag_storage);
257 ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
258 if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
259 mte_free_tag_storage(tag_storage);
261 } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
262 mte_free_tag_storage(ret);
268 static void swsusp_mte_free_storage(void)
270 XA_STATE(xa_state, &mte_pages, 0);
274 xas_for_each(&xa_state, tags, ULONG_MAX) {
275 mte_free_tag_storage(tags);
277 xa_unlock(&mte_pages);
279 xa_destroy(&mte_pages);
282 static int swsusp_mte_save_tags(void)
285 unsigned long pfn, max_zone_pfn;
289 if (!system_supports_mte())
292 for_each_populated_zone(zone) {
293 max_zone_pfn = zone_end_pfn(zone);
294 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
295 struct page *page = pfn_to_online_page(pfn);
300 if (!test_bit(PG_mte_tagged, &page->flags))
303 ret = save_tags(page, pfn);
305 swsusp_mte_free_storage();
312 pr_info("Saved %d MTE pages\n", n);
318 static void swsusp_mte_restore_tags(void)
320 XA_STATE(xa_state, &mte_pages, 0);
325 xas_for_each(&xa_state, tags, ULONG_MAX) {
326 unsigned long pfn = xa_state.xa_index;
327 struct page *page = pfn_to_online_page(pfn);
330 * It is not required to invoke page_kasan_tag_reset(page)
331 * at this point since the tags stored in page->flags are
334 mte_restore_page_tags(page_address(page), tags);
336 mte_free_tag_storage(tags);
339 xa_unlock(&mte_pages);
341 pr_info("Restored %d MTE pages\n", n);
343 xa_destroy(&mte_pages);
346 #else /* CONFIG_ARM64_MTE */
348 static int swsusp_mte_save_tags(void)
353 static void swsusp_mte_restore_tags(void)
357 #endif /* CONFIG_ARM64_MTE */
359 int swsusp_arch_suspend(void)
363 struct sleep_stack_data state;
365 if (cpus_are_stuck_in_kernel()) {
366 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
370 flags = local_daif_save();
372 if (__cpu_suspend_enter(&state)) {
373 /* make the crash dump kernel image visible/saveable */
374 crash_prepare_suspend();
376 ret = swsusp_mte_save_tags();
380 sleep_cpu = smp_processor_id();
383 /* Clean kernel core startup/idle code to PoC*/
384 dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
385 (unsigned long)__mmuoff_data_end);
386 dcache_clean_inval_poc((unsigned long)__idmap_text_start,
387 (unsigned long)__idmap_text_end);
389 /* Clean kvm setup code to PoC? */
390 if (el2_reset_needed()) {
391 dcache_clean_inval_poc(
392 (unsigned long)__hyp_idmap_text_start,
393 (unsigned long)__hyp_idmap_text_end);
394 dcache_clean_inval_poc((unsigned long)__hyp_text_start,
395 (unsigned long)__hyp_text_end);
398 swsusp_mte_restore_tags();
400 /* make the crash dump kernel image protected again */
404 * Tell the hibernation core that we've just restored
410 __cpu_suspend_exit();
413 * Just in case the boot kernel did turn the SSBD
414 * mitigation off behind our back, let's set the state
415 * to what we expect it to be.
417 spectre_v4_enable_mitigation(NULL);
420 local_daif_restore(flags);
426 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
428 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
429 * we don't need to free it here.
431 int swsusp_arch_resume(void)
437 void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
438 void *, phys_addr_t, phys_addr_t);
439 struct trans_pgd_info trans_info = {
440 .trans_alloc_page = hibernate_page_alloc,
441 .trans_alloc_arg = (void *)GFP_ATOMIC,
445 * Restoring the memory image will overwrite the ttbr1 page tables.
446 * Create a second copy of just the linear map, and use this when
449 rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET,
455 * We need a zero page that is zero before & after resume in order to
456 * to break before make on the ttbr1 page tables.
458 zero_page = (void *)get_safe_page(GFP_ATOMIC);
460 pr_err("Failed to allocate zero page.\n");
464 exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
466 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
467 * a new set of ttbr0 page tables and load them.
469 rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
470 (phys_addr_t *)&hibernate_exit);
472 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
477 * The hibernate exit text contains a set of el2 vectors, that will
478 * be executed at el2 with the mmu off in order to reload hyp-stub.
480 dcache_clean_inval_poc((unsigned long)hibernate_exit,
481 (unsigned long)hibernate_exit + exit_size);
484 * KASLR will cause the el2 vectors to be in a different location in
485 * the resumed kernel. Load hibernate's temporary copy into el2.
487 * We can skip this step if we booted at EL1, or are running with VHE.
489 if (el2_reset_needed()) {
490 phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit;
491 el2_vectors += hibernate_el2_vectors -
492 __hibernate_exit_text_start; /* offset */
494 __hyp_set_vectors(el2_vectors);
497 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
498 resume_hdr.reenter_kernel, restore_pblist,
499 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
504 int hibernate_resume_nonboot_cpu_disable(void)
507 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
511 return freeze_secondary_cpus(sleep_cpu);