2 * Suspend support specific for i386/x86-64.
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
15 #include <linux/dmi.h>
17 #include <asm/pgtable.h>
18 #include <asm/proto.h>
22 #include <asm/suspend.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/debugreg.h>
26 #include <asm/mmu_context.h>
27 #include <asm/cpu_device_id.h>
30 __visible unsigned long saved_context_ebx;
31 __visible unsigned long saved_context_esp, saved_context_ebp;
32 __visible unsigned long saved_context_esi, saved_context_edi;
33 __visible unsigned long saved_context_eflags;
35 struct saved_context saved_context;
37 static void msr_save_context(struct saved_context *ctxt)
39 struct saved_msr *msr = ctxt->saved_msrs.array;
40 struct saved_msr *end = msr + ctxt->saved_msrs.num;
43 msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
48 static void msr_restore_context(struct saved_context *ctxt)
50 struct saved_msr *msr = ctxt->saved_msrs.array;
51 struct saved_msr *end = msr + ctxt->saved_msrs.num;
55 wrmsrl(msr->info.msr_no, msr->info.reg.q);
61 * __save_processor_state - save CPU registers before creating a
62 * hibernation image and before restoring the memory state from it
63 * @ctxt - structure to store the registers contents in
65 * NOTE: If there is a CPU register the modification of which by the
66 * boot kernel (ie. the kernel used for loading the hibernation image)
67 * might affect the operations of the restored target kernel (ie. the one
68 * saved in the hibernation image), then its contents must be saved by this
69 * function. In other words, if kernel A is hibernated and different
70 * kernel B is used for loading the hibernation image into memory, the
71 * kernel A's __save_processor_state() function must save all registers
72 * needed by kernel A, so that it can operate correctly after the resume
73 * regardless of what kernel B does in the meantime.
75 static void __save_processor_state(struct saved_context *ctxt)
78 mtrr_save_fixed_ranges(NULL);
86 store_idt(&ctxt->idt);
89 store_idt((struct desc_ptr *)&ctxt->idt_limit);
92 * We save it here, but restore it only in the hibernate case.
93 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
94 * mode in "secondary_startup_64". In 32-bit mode it is done via
95 * 'pmode_gdt' in wakeup_start.
97 ctxt->gdt_desc.size = GDT_SIZE - 1;
98 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id());
102 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
107 savesegment(es, ctxt->es);
108 savesegment(fs, ctxt->fs);
109 savesegment(gs, ctxt->gs);
110 savesegment(ss, ctxt->ss);
113 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
114 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
115 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
116 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
117 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
119 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
120 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
121 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
122 mtrr_save_fixed_ranges(NULL);
124 rdmsrl(MSR_EFER, ctxt->efer);
130 ctxt->cr0 = read_cr0();
131 ctxt->cr2 = read_cr2();
132 ctxt->cr3 = read_cr3();
133 ctxt->cr4 = __read_cr4_safe();
135 ctxt->cr8 = read_cr8();
137 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
139 msr_save_context(ctxt);
142 /* Needed by apm.c */
143 void save_processor_state(void)
145 __save_processor_state(&saved_context);
146 x86_platform.save_sched_clock_state();
149 EXPORT_SYMBOL(save_processor_state);
152 static void do_fpu_end(void)
155 * Restore FPU regs if necessary.
160 static void fix_processor_context(void)
162 int cpu = smp_processor_id();
163 struct tss_struct *t = &per_cpu(cpu_tss, cpu);
165 struct desc_struct *desc = get_cpu_gdt_table(cpu);
168 set_tss_desc(cpu, t); /*
169 * This just modifies memory; should not be
170 * necessary. But... This is necessary, because
171 * 386 hardware has concept of busy TSS or some
176 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
177 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
178 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
180 syscall_init(); /* This sets MSR_*STAR and related */
182 load_TR_desc(); /* This does ltr */
183 load_mm_ldt(current->active_mm); /* This does lldt */
189 * __restore_processor_state - restore the contents of CPU registers saved
190 * by __save_processor_state()
191 * @ctxt - structure to load the registers contents from
193 static void notrace __restore_processor_state(struct saved_context *ctxt)
195 if (ctxt->misc_enable_saved)
196 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
200 /* cr4 was introduced in the Pentium CPU */
203 __write_cr4(ctxt->cr4);
206 wrmsrl(MSR_EFER, ctxt->efer);
207 write_cr8(ctxt->cr8);
208 __write_cr4(ctxt->cr4);
210 write_cr3(ctxt->cr3);
211 write_cr2(ctxt->cr2);
212 write_cr0(ctxt->cr0);
215 * now restore the descriptor tables to their proper values
216 * ltr is done i fix_processor_context().
219 load_idt(&ctxt->idt);
222 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
229 loadsegment(es, ctxt->es);
230 loadsegment(fs, ctxt->fs);
231 loadsegment(gs, ctxt->gs);
232 loadsegment(ss, ctxt->ss);
237 if (boot_cpu_has(X86_FEATURE_SEP))
241 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
242 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
243 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
244 load_gs_index(ctxt->gs);
245 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
247 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
248 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
249 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
252 fix_processor_context();
255 x86_platform.restore_sched_clock_state();
257 perf_restore_debug_store();
258 msr_restore_context(ctxt);
261 /* Needed by apm.c */
262 void notrace restore_processor_state(void)
264 __restore_processor_state(&saved_context);
267 EXPORT_SYMBOL(restore_processor_state);
271 * When bsp_check() is called in hibernate and suspend, cpu hotplug
272 * is disabled already. So it's unnessary to handle race condition between
273 * cpumask query and cpu hotplug.
275 static int bsp_check(void)
277 if (cpumask_first(cpu_online_mask) != 0) {
278 pr_warn("CPU0 is offline.\n");
285 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
291 case PM_SUSPEND_PREPARE:
292 case PM_HIBERNATION_PREPARE:
295 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
296 case PM_RESTORE_PREPARE:
298 * When system resumes from hibernation, online CPU0 because
299 * 1. it's required for resume and
300 * 2. the CPU was online before hibernation
303 _debug_hotplug_cpu(0, 1);
305 case PM_POST_RESTORE:
307 * When a resume really happens, this code won't be called.
309 * This code is called only when user space hibernation software
310 * prepares for snapshot device during boot time. So we just
311 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
312 * preparing the snapshot device.
314 * This works for normal boot case in our CPU0 hotplug debug
315 * mode, i.e. CPU0 is offline and user mode hibernation
316 * software initializes during boot time.
318 * If CPU0 is online and user application accesses snapshot
319 * device after boot time, this will offline CPU0 and user may
320 * see different CPU0 state before and after accessing
321 * the snapshot device. But hopefully this is not a case when
322 * user debugging CPU0 hotplug. Even if users hit this case,
323 * they can easily online CPU0 back.
325 * To simplify this debug code, we only consider normal boot
326 * case. Otherwise we need to remember CPU0's state and restore
327 * to that state and resolve racy conditions etc.
329 _debug_hotplug_cpu(0, 0);
335 return notifier_from_errno(ret);
338 static int __init bsp_pm_check_init(void)
341 * Set this bsp_pm_callback as lower priority than
342 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
343 * earlier to disable cpu hotplug before bsp online check.
345 pm_notifier(bsp_pm_callback, -INT_MAX);
349 core_initcall(bsp_pm_check_init);
351 static int msr_build_context(const u32 *msr_id, const int num)
353 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
354 struct saved_msr *msr_array;
358 total_num = saved_msrs->num + num;
360 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
362 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
366 if (saved_msrs->array) {
368 * Multiple callbacks can invoke this function, so copy any
369 * MSR save requests from previous invocations.
371 memcpy(msr_array, saved_msrs->array,
372 sizeof(struct saved_msr) * saved_msrs->num);
374 kfree(saved_msrs->array);
377 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
378 msr_array[i].info.msr_no = msr_id[j];
379 msr_array[i].valid = false;
380 msr_array[i].info.reg.q = 0;
382 saved_msrs->num = total_num;
383 saved_msrs->array = msr_array;
389 * The following sections are a quirk framework for problematic BIOSen:
390 * Sometimes MSRs are modified by the BIOSen after suspended to
391 * RAM, this might cause unexpected behavior after wakeup.
392 * Thus we save/restore these specified MSRs across suspend/resume
393 * in order to work around it.
395 * For any further problematic BIOSen/platforms,
396 * please add your own function similar to msr_initialize_bdw.
398 static int msr_initialize_bdw(const struct dmi_system_id *d)
400 /* Add any extra MSR ids into this array. */
401 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
403 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
404 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
407 static struct dmi_system_id msr_save_dmi_table[] = {
409 .callback = msr_initialize_bdw,
410 .ident = "BROADWELL BDX_EP",
412 DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
413 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
419 static int msr_save_cpuid_features(const struct x86_cpu_id *c)
421 u32 cpuid_msr_id[] = {
422 MSR_AMD64_CPUID_FN_1,
425 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
428 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
431 static const struct x86_cpu_id msr_save_cpu_table[] = {
433 .vendor = X86_VENDOR_AMD,
435 .model = X86_MODEL_ANY,
436 .feature = X86_FEATURE_ANY,
437 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
440 .vendor = X86_VENDOR_AMD,
442 .model = X86_MODEL_ANY,
443 .feature = X86_FEATURE_ANY,
444 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
449 typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
450 static int pm_cpu_check(const struct x86_cpu_id *c)
452 const struct x86_cpu_id *m;
455 m = x86_match_cpu(msr_save_cpu_table);
459 fn = (pm_cpu_match_t)m->driver_data;
466 static int pm_check_save_msr(void)
468 dmi_check_system(msr_save_dmi_table);
469 pm_cpu_check(msr_save_cpu_table);
474 device_initcall(pm_check_save_msr);