2 * Suspend support specific for i386/x86-64.
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
15 #include <linux/tboot.h>
16 #include <linux/dmi.h>
18 #include <asm/pgtable.h>
19 #include <asm/proto.h>
23 #include <asm/suspend.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/debugreg.h>
27 #include <asm/mmu_context.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/microcode.h>
32 __visible unsigned long saved_context_ebx;
33 __visible unsigned long saved_context_esp, saved_context_ebp;
34 __visible unsigned long saved_context_esi, saved_context_edi;
35 __visible unsigned long saved_context_eflags;
37 struct saved_context saved_context;
39 static void msr_save_context(struct saved_context *ctxt)
41 struct saved_msr *msr = ctxt->saved_msrs.array;
42 struct saved_msr *end = msr + ctxt->saved_msrs.num;
46 rdmsrl(msr->info.msr_no, msr->info.reg.q);
51 static void msr_restore_context(struct saved_context *ctxt)
53 struct saved_msr *msr = ctxt->saved_msrs.array;
54 struct saved_msr *end = msr + ctxt->saved_msrs.num;
58 wrmsrl(msr->info.msr_no, msr->info.reg.q);
64 * __save_processor_state - save CPU registers before creating a
65 * hibernation image and before restoring the memory state from it
66 * @ctxt - structure to store the registers contents in
68 * NOTE: If there is a CPU register the modification of which by the
69 * boot kernel (ie. the kernel used for loading the hibernation image)
70 * might affect the operations of the restored target kernel (ie. the one
71 * saved in the hibernation image), then its contents must be saved by this
72 * function. In other words, if kernel A is hibernated and different
73 * kernel B is used for loading the hibernation image into memory, the
74 * kernel A's __save_processor_state() function must save all registers
75 * needed by kernel A, so that it can operate correctly after the resume
76 * regardless of what kernel B does in the meantime.
78 static void __save_processor_state(struct saved_context *ctxt)
81 mtrr_save_fixed_ranges(NULL);
88 store_idt(&ctxt->idt);
91 * We save it here, but restore it only in the hibernate case.
92 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
93 * mode in "secondary_startup_64". In 32-bit mode it is done via
94 * 'pmode_gdt' in wakeup_start.
96 ctxt->gdt_desc.size = GDT_SIZE - 1;
97 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
101 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
105 #ifdef CONFIG_X86_32_LAZY_GS
106 savesegment(gs, ctxt->gs);
109 savesegment(gs, ctxt->gs);
110 savesegment(fs, ctxt->fs);
111 savesegment(ds, ctxt->ds);
112 savesegment(es, ctxt->es);
114 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
115 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
116 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
117 mtrr_save_fixed_ranges(NULL);
119 rdmsrl(MSR_EFER, ctxt->efer);
125 ctxt->cr0 = read_cr0();
126 ctxt->cr2 = read_cr2();
127 ctxt->cr3 = __read_cr3();
128 ctxt->cr4 = __read_cr4();
130 ctxt->cr8 = read_cr8();
132 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
134 msr_save_context(ctxt);
137 /* Needed by apm.c */
138 void save_processor_state(void)
140 __save_processor_state(&saved_context);
141 x86_platform.save_sched_clock_state();
144 EXPORT_SYMBOL(save_processor_state);
147 static void do_fpu_end(void)
150 * Restore FPU regs if necessary.
155 static void fix_processor_context(void)
157 int cpu = smp_processor_id();
159 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
164 * We need to reload TR, which requires that we change the
165 * GDT entry to indicate "available" first.
167 * XXX: This could probably all be replaced by a call to
170 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
173 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
174 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
175 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
177 syscall_init(); /* This sets MSR_*STAR and related */
179 if (boot_cpu_has(X86_FEATURE_SEP))
182 load_TR_desc(); /* This does ltr */
183 load_mm_ldt(current->active_mm); /* This does lldt */
184 initialize_tlbstate_and_flush();
188 /* The processor is back on the direct GDT, load back the fixmap */
189 load_fixmap_gdt(cpu);
193 * __restore_processor_state - restore the contents of CPU registers saved
194 * by __save_processor_state()
195 * @ctxt - structure to load the registers contents from
197 * The asm code that gets us here will have restored a usable GDT, although
198 * it will be pointing to the wrong alias.
200 static void notrace __restore_processor_state(struct saved_context *ctxt)
202 if (ctxt->misc_enable_saved)
203 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
207 /* cr4 was introduced in the Pentium CPU */
210 __write_cr4(ctxt->cr4);
213 wrmsrl(MSR_EFER, ctxt->efer);
214 write_cr8(ctxt->cr8);
215 __write_cr4(ctxt->cr4);
217 write_cr3(ctxt->cr3);
218 write_cr2(ctxt->cr2);
219 write_cr0(ctxt->cr0);
221 /* Restore the IDT. */
222 load_idt(&ctxt->idt);
225 * Just in case the asm code got us here with the SS, DS, or ES
226 * out of sync with the GDT, update them.
228 loadsegment(ss, __KERNEL_DS);
229 loadsegment(ds, __USER_DS);
230 loadsegment(es, __USER_DS);
233 * Restore percpu access. Percpu access can happen in exception
234 * handlers or in complicated helpers like load_gs_index().
237 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
239 loadsegment(fs, __KERNEL_PERCPU);
240 loadsegment(gs, __KERNEL_STACK_CANARY);
243 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
244 fix_processor_context();
247 * Now that we have descriptor tables fully restored and working
248 * exception handling, restore the usermode segments.
251 loadsegment(ds, ctxt->es);
252 loadsegment(es, ctxt->es);
253 loadsegment(fs, ctxt->fs);
254 load_gs_index(ctxt->gs);
257 * Restore FSBASE and GSBASE after restoring the selectors, since
258 * restoring the selectors clobbers the bases. Keep in mind
259 * that MSR_KERNEL_GS_BASE is horribly misnamed.
261 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
262 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
263 #elif defined(CONFIG_X86_32_LAZY_GS)
264 loadsegment(gs, ctxt->gs);
268 tsc_verify_tsc_adjust(true);
269 x86_platform.restore_sched_clock_state();
271 perf_restore_debug_store();
273 microcode_bsp_resume();
276 * This needs to happen after the microcode has been updated upon resume
277 * because some of the MSRs are "emulated" in microcode.
279 msr_restore_context(ctxt);
282 /* Needed by apm.c */
283 void notrace restore_processor_state(void)
285 __restore_processor_state(&saved_context);
288 EXPORT_SYMBOL(restore_processor_state);
291 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
292 static void resume_play_dead(void)
295 tboot_shutdown(TB_SHUTDOWN_WFS);
299 int hibernate_resume_nonboot_cpu_disable(void)
301 void (*play_dead)(void) = smp_ops.play_dead;
305 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
306 * during hibernate image restoration, because it is likely that the
307 * monitored address will be actually written to at that time and then
308 * the "dead" CPU will attempt to execute instructions again, but the
309 * address in its instruction pointer may not be possible to resolve
310 * any more at that point (the page tables used by it previously may
311 * have been overwritten by hibernate image data).
313 * First, make sure that we wake up all the potentially disabled SMT
314 * threads which have been initially brought up and then put into
315 * mwait/cpuidle sleep.
316 * Those will be put to proper (not interfering with hibernation
317 * resume) sleep afterwards, and the resumed kernel will decide itself
318 * what to do with them.
320 ret = cpuhp_smt_enable();
323 smp_ops.play_dead = resume_play_dead;
324 ret = disable_nonboot_cpus();
325 smp_ops.play_dead = play_dead;
331 * When bsp_check() is called in hibernate and suspend, cpu hotplug
332 * is disabled already. So it's unnessary to handle race condition between
333 * cpumask query and cpu hotplug.
335 static int bsp_check(void)
337 if (cpumask_first(cpu_online_mask) != 0) {
338 pr_warn("CPU0 is offline.\n");
345 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
351 case PM_SUSPEND_PREPARE:
352 case PM_HIBERNATION_PREPARE:
355 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
356 case PM_RESTORE_PREPARE:
358 * When system resumes from hibernation, online CPU0 because
359 * 1. it's required for resume and
360 * 2. the CPU was online before hibernation
363 _debug_hotplug_cpu(0, 1);
365 case PM_POST_RESTORE:
367 * When a resume really happens, this code won't be called.
369 * This code is called only when user space hibernation software
370 * prepares for snapshot device during boot time. So we just
371 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
372 * preparing the snapshot device.
374 * This works for normal boot case in our CPU0 hotplug debug
375 * mode, i.e. CPU0 is offline and user mode hibernation
376 * software initializes during boot time.
378 * If CPU0 is online and user application accesses snapshot
379 * device after boot time, this will offline CPU0 and user may
380 * see different CPU0 state before and after accessing
381 * the snapshot device. But hopefully this is not a case when
382 * user debugging CPU0 hotplug. Even if users hit this case,
383 * they can easily online CPU0 back.
385 * To simplify this debug code, we only consider normal boot
386 * case. Otherwise we need to remember CPU0's state and restore
387 * to that state and resolve racy conditions etc.
389 _debug_hotplug_cpu(0, 0);
395 return notifier_from_errno(ret);
398 static int __init bsp_pm_check_init(void)
401 * Set this bsp_pm_callback as lower priority than
402 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
403 * earlier to disable cpu hotplug before bsp online check.
405 pm_notifier(bsp_pm_callback, -INT_MAX);
409 core_initcall(bsp_pm_check_init);
411 static int msr_build_context(const u32 *msr_id, const int num)
413 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
414 struct saved_msr *msr_array;
418 total_num = saved_msrs->num + num;
420 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
422 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
426 if (saved_msrs->array) {
428 * Multiple callbacks can invoke this function, so copy any
429 * MSR save requests from previous invocations.
431 memcpy(msr_array, saved_msrs->array,
432 sizeof(struct saved_msr) * saved_msrs->num);
434 kfree(saved_msrs->array);
437 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
440 msr_array[i].info.msr_no = msr_id[j];
441 msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
442 msr_array[i].info.reg.q = 0;
444 saved_msrs->num = total_num;
445 saved_msrs->array = msr_array;
451 * The following sections are a quirk framework for problematic BIOSen:
452 * Sometimes MSRs are modified by the BIOSen after suspended to
453 * RAM, this might cause unexpected behavior after wakeup.
454 * Thus we save/restore these specified MSRs across suspend/resume
455 * in order to work around it.
457 * For any further problematic BIOSen/platforms,
458 * please add your own function similar to msr_initialize_bdw.
460 static int msr_initialize_bdw(const struct dmi_system_id *d)
462 /* Add any extra MSR ids into this array. */
463 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
465 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
466 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
469 static const struct dmi_system_id msr_save_dmi_table[] = {
471 .callback = msr_initialize_bdw,
472 .ident = "BROADWELL BDX_EP",
474 DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
475 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
481 static int msr_save_cpuid_features(const struct x86_cpu_id *c)
483 u32 cpuid_msr_id[] = {
484 MSR_AMD64_CPUID_FN_1,
487 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
490 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
493 static const struct x86_cpu_id msr_save_cpu_table[] = {
495 .vendor = X86_VENDOR_AMD,
497 .model = X86_MODEL_ANY,
498 .feature = X86_FEATURE_ANY,
499 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
502 .vendor = X86_VENDOR_AMD,
504 .model = X86_MODEL_ANY,
505 .feature = X86_FEATURE_ANY,
506 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
511 typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
512 static int pm_cpu_check(const struct x86_cpu_id *c)
514 const struct x86_cpu_id *m;
517 m = x86_match_cpu(msr_save_cpu_table);
521 fn = (pm_cpu_match_t)m->driver_data;
528 static void pm_save_spec_msr(void)
530 struct msr_enumeration {
534 { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
535 { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
536 { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
537 { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
538 { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
539 { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
543 for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
544 if (boot_cpu_has(msr_enum[i].feature))
545 msr_build_context(&msr_enum[i].msr_no, 1);
549 static int pm_check_save_msr(void)
551 dmi_check_system(msr_save_dmi_table);
552 pm_cpu_check(msr_save_cpu_table);
558 device_initcall(pm_check_save_msr);