2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 #include <linux/module.h>
28 #include <linux/acpi.h>
29 #include <linux/dmi.h>
30 #include <linux/sched.h> /* need_resched() */
31 #include <linux/sort.h>
32 #include <linux/tick.h>
33 #include <linux/cpuidle.h>
34 #include <linux/syscore_ops.h>
35 #include <acpi/processor.h>
38 * Include the apic definitions for x86 to have the APIC timer related defines
39 * available also for UP (on SMP it gets magically included via linux/smp.h).
40 * asm/acpi.h is not an option, as it would require more include magic. Also
41 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
47 #define PREFIX "ACPI: "
49 #define ACPI_PROCESSOR_CLASS "processor"
50 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
51 ACPI_MODULE_NAME("processor_idle");
53 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
54 module_param(max_cstate, uint, 0000);
55 static unsigned int nocst __read_mostly;
56 module_param(nocst, uint, 0000);
57 static int bm_check_disable __read_mostly;
58 module_param(bm_check_disable, uint, 0000);
60 static unsigned int latency_factor __read_mostly = 2;
61 module_param(latency_factor, uint, 0644);
63 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
65 static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
68 static int disabled_by_idle_boot_param(void)
70 return boot_option_idle_override == IDLE_POLL ||
71 boot_option_idle_override == IDLE_HALT;
75 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
76 * For now disable this. Probably a bug somewhere else.
78 * To skip this limit, boot/load with a large max_cstate limit.
80 static int set_max_cstate(const struct dmi_system_id *id)
82 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
85 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
86 " Override with \"processor.max_cstate=%d\"\n", id->ident,
87 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
89 max_cstate = (long)id->driver_data;
94 static const struct dmi_system_id processor_power_dmi_table[] = {
95 { set_max_cstate, "Clevo 5600D", {
96 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
97 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
99 { set_max_cstate, "Pavilion zv5000", {
100 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
101 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
103 { set_max_cstate, "Asus L8400B", {
104 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
105 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
112 * Callers should disable interrupts before the call and enable
113 * interrupts after return.
115 static void acpi_safe_halt(void)
117 if (!tif_need_resched()) {
123 #ifdef ARCH_APICTIMER_STOPS_ON_C3
126 * Some BIOS implementations switch to C3 in the published C2 state.
127 * This seems to be a common problem on AMD boxen, but other vendors
128 * are affected too. We pick the most conservative approach: we assume
129 * that the local APIC stops in both C2 and C3.
131 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
132 struct acpi_processor_cx *cx)
134 struct acpi_processor_power *pwr = &pr->power;
135 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
137 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
140 if (amd_e400_c1e_detected)
141 type = ACPI_STATE_C1;
144 * Check, if one of the previous states already marked the lapic
147 if (pwr->timer_broadcast_on_state < state)
150 if (cx->type >= type)
151 pr->power.timer_broadcast_on_state = state;
154 static void __lapic_timer_propagate_broadcast(void *arg)
156 struct acpi_processor *pr = (struct acpi_processor *) arg;
158 if (pr->power.timer_broadcast_on_state < INT_MAX)
159 tick_broadcast_enable();
161 tick_broadcast_disable();
164 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
166 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
170 /* Power(C) State timer broadcast control */
171 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
172 struct acpi_processor_cx *cx,
175 int state = cx - pr->power.states;
177 if (state >= pr->power.timer_broadcast_on_state) {
179 tick_broadcast_enter();
181 tick_broadcast_exit();
187 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
188 struct acpi_processor_cx *cstate) { }
189 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
190 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
191 struct acpi_processor_cx *cx,
198 #ifdef CONFIG_PM_SLEEP
199 static u32 saved_bm_rld;
201 static int acpi_processor_suspend(void)
203 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
207 static void acpi_processor_resume(void)
209 u32 resumed_bm_rld = 0;
211 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
212 if (resumed_bm_rld == saved_bm_rld)
215 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
218 static struct syscore_ops acpi_processor_syscore_ops = {
219 .suspend = acpi_processor_suspend,
220 .resume = acpi_processor_resume,
223 void acpi_processor_syscore_init(void)
225 register_syscore_ops(&acpi_processor_syscore_ops);
228 void acpi_processor_syscore_exit(void)
230 unregister_syscore_ops(&acpi_processor_syscore_ops);
232 #endif /* CONFIG_PM_SLEEP */
234 #if defined(CONFIG_X86)
235 static void tsc_check_state(int state)
237 switch (boot_cpu_data.x86_vendor) {
239 case X86_VENDOR_INTEL:
241 * AMD Fam10h TSC will tick in all
242 * C/P/S0/S1 states when this bit is set.
244 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
249 /* TSC could halt in idle, so notify users */
250 if (state > ACPI_STATE_C1)
251 mark_tsc_unstable("TSC halts in idle");
255 static void tsc_check_state(int state) { return; }
258 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
264 /* if info is obtained from pblk/fadt, type equals state */
265 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
266 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
268 #ifndef CONFIG_HOTPLUG_CPU
270 * Check for P_LVL2_UP flag before entering C2 and above on
273 if ((num_online_cpus() > 1) &&
274 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
278 /* determine C2 and C3 address from pblk */
279 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
280 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
282 /* determine latencies from FADT */
283 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
284 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
287 * FADT specified C2 latency must be less than or equal to
290 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
291 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
292 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
294 pr->power.states[ACPI_STATE_C2].address = 0;
298 * FADT supplied C3 latency must be less than or equal to
301 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
302 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
303 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
305 pr->power.states[ACPI_STATE_C3].address = 0;
308 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
309 "lvl2[0x%08x] lvl3[0x%08x]\n",
310 pr->power.states[ACPI_STATE_C2].address,
311 pr->power.states[ACPI_STATE_C3].address));
316 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
318 if (!pr->power.states[ACPI_STATE_C1].valid) {
319 /* set the first C-State to C1 */
320 /* all processors need to support C1 */
321 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
322 pr->power.states[ACPI_STATE_C1].valid = 1;
323 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
325 /* the C0 state only exists as a filler in our array */
326 pr->power.states[ACPI_STATE_C0].valid = 1;
330 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
336 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
337 union acpi_object *cst;
345 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
346 if (ACPI_FAILURE(status)) {
347 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
351 cst = buffer.pointer;
353 /* There must be at least 2 elements */
354 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
355 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
360 count = cst->package.elements[0].integer.value;
362 /* Validate number of power states. */
363 if (count < 1 || count != cst->package.count - 1) {
364 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
369 /* Tell driver that at least _CST is supported. */
370 pr->flags.has_cst = 1;
372 for (i = 1; i <= count; i++) {
373 union acpi_object *element;
374 union acpi_object *obj;
375 struct acpi_power_register *reg;
376 struct acpi_processor_cx cx;
378 memset(&cx, 0, sizeof(cx));
380 element = &(cst->package.elements[i]);
381 if (element->type != ACPI_TYPE_PACKAGE)
384 if (element->package.count != 4)
387 obj = &(element->package.elements[0]);
389 if (obj->type != ACPI_TYPE_BUFFER)
392 reg = (struct acpi_power_register *)obj->buffer.pointer;
394 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
395 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
398 /* There should be an easy way to extract an integer... */
399 obj = &(element->package.elements[1]);
400 if (obj->type != ACPI_TYPE_INTEGER)
403 cx.type = obj->integer.value;
405 * Some buggy BIOSes won't list C1 in _CST -
406 * Let acpi_processor_get_power_info_default() handle them later
408 if (i == 1 && cx.type != ACPI_STATE_C1)
411 cx.address = reg->address;
412 cx.index = current_count + 1;
414 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
415 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
416 if (acpi_processor_ffh_cstate_probe
417 (pr->id, &cx, reg) == 0) {
418 cx.entry_method = ACPI_CSTATE_FFH;
419 } else if (cx.type == ACPI_STATE_C1) {
421 * C1 is a special case where FIXED_HARDWARE
422 * can be handled in non-MWAIT way as well.
423 * In that case, save this _CST entry info.
424 * Otherwise, ignore this info and continue.
426 cx.entry_method = ACPI_CSTATE_HALT;
427 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
431 if (cx.type == ACPI_STATE_C1 &&
432 (boot_option_idle_override == IDLE_NOMWAIT)) {
434 * In most cases the C1 space_id obtained from
435 * _CST object is FIXED_HARDWARE access mode.
436 * But when the option of idle=halt is added,
437 * the entry_method type should be changed from
438 * CSTATE_FFH to CSTATE_HALT.
439 * When the option of idle=nomwait is added,
440 * the C1 entry_method type should be
443 cx.entry_method = ACPI_CSTATE_HALT;
444 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
447 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
451 if (cx.type == ACPI_STATE_C1) {
455 obj = &(element->package.elements[2]);
456 if (obj->type != ACPI_TYPE_INTEGER)
459 cx.latency = obj->integer.value;
461 obj = &(element->package.elements[3]);
462 if (obj->type != ACPI_TYPE_INTEGER)
466 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
469 * We support total ACPI_PROCESSOR_MAX_POWER - 1
470 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
472 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
474 "Limiting number of power states to max (%d)\n",
475 ACPI_PROCESSOR_MAX_POWER);
477 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
482 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
485 /* Validate number of power states discovered */
486 if (current_count < 2)
490 kfree(buffer.pointer);
495 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
496 struct acpi_processor_cx *cx)
498 static int bm_check_flag = -1;
499 static int bm_control_flag = -1;
506 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
507 * DMA transfers are used by any ISA device to avoid livelock.
508 * Note that we could disable Type-F DMA (as recommended by
509 * the erratum), but this is known to disrupt certain ISA
510 * devices thus we take the conservative approach.
512 else if (errata.piix4.fdma) {
513 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
514 "C3 not supported on PIIX4 with Type-F DMA\n"));
518 /* All the logic here assumes flags.bm_check is same across all CPUs */
519 if (bm_check_flag == -1) {
520 /* Determine whether bm_check is needed based on CPU */
521 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
522 bm_check_flag = pr->flags.bm_check;
523 bm_control_flag = pr->flags.bm_control;
525 pr->flags.bm_check = bm_check_flag;
526 pr->flags.bm_control = bm_control_flag;
529 if (pr->flags.bm_check) {
530 if (!pr->flags.bm_control) {
531 if (pr->flags.has_cst != 1) {
532 /* bus mastering control is necessary */
533 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
534 "C3 support requires BM control\n"));
537 /* Here we enter C3 without bus mastering */
538 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
539 "C3 support without BM control\n"));
544 * WBINVD should be set in fadt, for C3 state to be
545 * supported on when bm_check is not required.
547 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
548 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
549 "Cache invalidation should work properly"
550 " for C3 to be enabled on SMP systems\n"));
556 * Otherwise we've met all of our C3 requirements.
557 * Normalize the C3 latency to expidite policy. Enable
558 * checking of bus mastering status (bm_check) so we can
559 * use this in our C3 policy
564 * On older chipsets, BM_RLD needs to be set
565 * in order for Bus Master activity to wake the
566 * system from C3. Newer chipsets handle DMA
567 * during C3 automatically and BM_RLD is a NOP.
568 * In either case, the proper way to
569 * handle BM_RLD is to set it and leave it set.
571 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
576 static int acpi_cst_latency_cmp(const void *a, const void *b)
578 const struct acpi_processor_cx *x = a, *y = b;
580 if (!(x->valid && y->valid))
582 if (x->latency > y->latency)
584 if (x->latency < y->latency)
588 static void acpi_cst_latency_swap(void *a, void *b, int n)
590 struct acpi_processor_cx *x = a, *y = b;
593 if (!(x->valid && y->valid))
596 x->latency = y->latency;
600 static int acpi_processor_power_verify(struct acpi_processor *pr)
603 unsigned int working = 0;
604 unsigned int last_latency = 0;
605 unsigned int last_type = 0;
606 bool buggy_latency = false;
608 pr->power.timer_broadcast_on_state = INT_MAX;
610 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
611 struct acpi_processor_cx *cx = &pr->power.states[i];
625 acpi_processor_power_verify_c3(pr, cx);
630 if (cx->type >= last_type && cx->latency < last_latency)
631 buggy_latency = true;
632 last_latency = cx->latency;
633 last_type = cx->type;
635 lapic_timer_check_state(i, pr, cx);
636 tsc_check_state(cx->type);
641 pr_notice("FW issue: working around C-state latencies out of order\n");
642 sort(&pr->power.states[1], max_cstate,
643 sizeof(struct acpi_processor_cx),
644 acpi_cst_latency_cmp,
645 acpi_cst_latency_swap);
648 lapic_timer_propagate_broadcast(pr);
653 static int acpi_processor_get_power_info(struct acpi_processor *pr)
659 /* NOTE: the idle thread may not be running while calling
662 /* Zero initialize all the C-states info. */
663 memset(pr->power.states, 0, sizeof(pr->power.states));
665 result = acpi_processor_get_power_info_cst(pr);
666 if (result == -ENODEV)
667 result = acpi_processor_get_power_info_fadt(pr);
672 acpi_processor_get_power_info_default(pr);
674 pr->power.count = acpi_processor_power_verify(pr);
677 * if one state of type C2 or C3 is available, mark this
678 * CPU as being "idle manageable"
680 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
681 if (pr->power.states[i].valid) {
683 if (pr->power.states[i].type >= ACPI_STATE_C2)
692 * acpi_idle_bm_check - checks if bus master activity was detected
694 static int acpi_idle_bm_check(void)
698 if (bm_check_disable)
701 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
703 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
705 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
706 * the true state of bus mastering activity; forcing us to
707 * manually check the BMIDEA bit of each IDE channel.
709 else if (errata.piix4.bmisx) {
710 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
711 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
718 * acpi_idle_do_entry - enter idle state using the appropriate method
721 * Caller disables interrupt before call and enables interrupt after return.
723 static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
725 if (cx->entry_method == ACPI_CSTATE_FFH) {
726 /* Call into architectural FFH based C-state */
727 acpi_processor_ffh_cstate_enter(cx);
728 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
731 /* IO port based C-state */
733 /* Dummy wait op - must do something useless after P_LVL2 read
734 because chipsets cannot guarantee that STPCLK# signal
735 gets asserted in time to freeze execution properly. */
736 inl(acpi_gbl_FADT.xpm_timer_block.address);
741 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
742 * @dev: the target CPU
743 * @index: the index of suggested state
745 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
747 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
749 ACPI_FLUSH_CPU_CACHE();
753 if (cx->entry_method == ACPI_CSTATE_HALT)
755 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
757 /* See comment in acpi_idle_do_entry() */
758 inl(acpi_gbl_FADT.xpm_timer_block.address);
767 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
769 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
770 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
773 static int c3_cpu_count;
774 static DEFINE_RAW_SPINLOCK(c3_lock);
777 * acpi_idle_enter_bm - enters C3 with proper BM handling
778 * @pr: Target processor
779 * @cx: Target state context
780 * @timer_bc: Whether or not to change timer mode to broadcast
782 static void acpi_idle_enter_bm(struct acpi_processor *pr,
783 struct acpi_processor_cx *cx, bool timer_bc)
785 acpi_unlazy_tlb(smp_processor_id());
788 * Must be done before busmaster disable as we might need to
792 lapic_timer_state_broadcast(pr, cx, 1);
796 * bm_check implies we need ARB_DIS
797 * bm_control implies whether we can do ARB_DIS
799 * That leaves a case where bm_check is set and bm_control is
800 * not set. In that case we cannot do much, we enter C3
801 * without doing anything.
803 if (pr->flags.bm_control) {
804 raw_spin_lock(&c3_lock);
806 /* Disable bus master arbitration when all CPUs are in C3 */
807 if (c3_cpu_count == num_online_cpus())
808 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
809 raw_spin_unlock(&c3_lock);
812 acpi_idle_do_entry(cx);
814 /* Re-enable bus master arbitration */
815 if (pr->flags.bm_control) {
816 raw_spin_lock(&c3_lock);
817 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
819 raw_spin_unlock(&c3_lock);
823 lapic_timer_state_broadcast(pr, cx, 0);
826 static int acpi_idle_enter(struct cpuidle_device *dev,
827 struct cpuidle_driver *drv, int index)
829 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
830 struct acpi_processor *pr;
832 pr = __this_cpu_read(processors);
836 if (cx->type != ACPI_STATE_C1) {
837 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
838 index = CPUIDLE_DRIVER_STATE_START;
839 cx = per_cpu(acpi_cstate[index], dev->cpu);
840 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
841 if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
842 acpi_idle_enter_bm(pr, cx, true);
844 } else if (drv->safe_state_index >= 0) {
845 index = drv->safe_state_index;
846 cx = per_cpu(acpi_cstate[index], dev->cpu);
854 lapic_timer_state_broadcast(pr, cx, 1);
856 if (cx->type == ACPI_STATE_C3)
857 ACPI_FLUSH_CPU_CACHE();
859 acpi_idle_do_entry(cx);
861 lapic_timer_state_broadcast(pr, cx, 0);
866 static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
867 struct cpuidle_driver *drv, int index)
869 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
871 if (cx->type == ACPI_STATE_C3) {
872 struct acpi_processor *pr = __this_cpu_read(processors);
877 if (pr->flags.bm_check) {
878 acpi_idle_enter_bm(pr, cx, false);
881 ACPI_FLUSH_CPU_CACHE();
884 acpi_idle_do_entry(cx);
887 struct cpuidle_driver acpi_idle_driver = {
889 .owner = THIS_MODULE,
893 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
894 * device i.e. per-cpu data
896 * @pr: the ACPI processor
897 * @dev : the cpuidle device
899 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
900 struct cpuidle_device *dev)
902 int i, count = CPUIDLE_DRIVER_STATE_START;
903 struct acpi_processor_cx *cx;
905 if (!pr->flags.power_setup_done)
908 if (pr->flags.power == 0) {
920 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
921 cx = &pr->power.states[i];
926 per_cpu(acpi_cstate[count], dev->cpu) = cx;
929 if (count == CPUIDLE_STATE_MAX)
940 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
941 * global state data i.e. idle routines
943 * @pr: the ACPI processor
945 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
947 int i, count = CPUIDLE_DRIVER_STATE_START;
948 struct acpi_processor_cx *cx;
949 struct cpuidle_state *state;
950 struct cpuidle_driver *drv = &acpi_idle_driver;
952 if (!pr->flags.power_setup_done)
955 if (pr->flags.power == 0)
958 drv->safe_state_index = -1;
959 for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
960 drv->states[i].name[0] = '\0';
961 drv->states[i].desc[0] = '\0';
967 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
968 cx = &pr->power.states[i];
973 state = &drv->states[count];
974 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
975 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
976 state->exit_latency = cx->latency;
977 state->target_residency = cx->latency * latency_factor;
978 state->enter = acpi_idle_enter;
981 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
982 state->enter_dead = acpi_idle_play_dead;
983 drv->safe_state_index = count;
986 * Halt-induced C1 is not good for ->enter_freeze, because it
987 * re-enables interrupts on exit. Moreover, C1 is generally not
988 * particularly interesting from the suspend-to-idle angle, so
989 * avoid C1 and the situations in which we may need to fall back
992 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
993 state->enter_freeze = acpi_idle_enter_freeze;
996 if (count == CPUIDLE_STATE_MAX)
1000 drv->state_count = count;
1008 int acpi_processor_hotplug(struct acpi_processor *pr)
1011 struct cpuidle_device *dev;
1013 if (disabled_by_idle_boot_param())
1019 if (!pr->flags.power_setup_done)
1022 dev = per_cpu(acpi_cpuidle_device, pr->id);
1023 cpuidle_pause_and_lock();
1024 cpuidle_disable_device(dev);
1025 acpi_processor_get_power_info(pr);
1026 if (pr->flags.power) {
1027 acpi_processor_setup_cpuidle_cx(pr, dev);
1028 ret = cpuidle_enable_device(dev);
1030 cpuidle_resume_and_unlock();
1035 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1038 struct acpi_processor *_pr;
1039 struct cpuidle_device *dev;
1041 if (disabled_by_idle_boot_param())
1047 if (!pr->flags.power_setup_done)
1051 * FIXME: Design the ACPI notification to make it once per
1052 * system instead of once per-cpu. This condition is a hack
1053 * to make the code that updates C-States be called once.
1056 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1058 /* Protect against cpu-hotplug */
1060 cpuidle_pause_and_lock();
1062 /* Disable all cpuidle devices */
1063 for_each_online_cpu(cpu) {
1064 _pr = per_cpu(processors, cpu);
1065 if (!_pr || !_pr->flags.power_setup_done)
1067 dev = per_cpu(acpi_cpuidle_device, cpu);
1068 cpuidle_disable_device(dev);
1071 /* Populate Updated C-state information */
1072 acpi_processor_get_power_info(pr);
1073 acpi_processor_setup_cpuidle_states(pr);
1075 /* Enable all cpuidle devices */
1076 for_each_online_cpu(cpu) {
1077 _pr = per_cpu(processors, cpu);
1078 if (!_pr || !_pr->flags.power_setup_done)
1080 acpi_processor_get_power_info(_pr);
1081 if (_pr->flags.power) {
1082 dev = per_cpu(acpi_cpuidle_device, cpu);
1083 acpi_processor_setup_cpuidle_cx(_pr, dev);
1084 cpuidle_enable_device(dev);
1087 cpuidle_resume_and_unlock();
1094 static int acpi_processor_registered;
1096 int acpi_processor_power_init(struct acpi_processor *pr)
1100 struct cpuidle_device *dev;
1101 static int first_run;
1103 if (disabled_by_idle_boot_param())
1107 dmi_check_system(processor_power_dmi_table);
1108 max_cstate = acpi_processor_cstate_check(max_cstate);
1109 if (max_cstate < ACPI_C_STATES_MAX)
1111 "ACPI: processor limited to max C-state %d\n",
1116 if (acpi_gbl_FADT.cst_control && !nocst) {
1118 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1119 if (ACPI_FAILURE(status)) {
1120 ACPI_EXCEPTION((AE_INFO, status,
1121 "Notifying BIOS of _CST ability failed"));
1125 acpi_processor_get_power_info(pr);
1126 pr->flags.power_setup_done = 1;
1129 * Install the idle handler if processor power management is supported.
1130 * Note that we use previously set idle handler will be used on
1131 * platforms that only support C1.
1133 if (pr->flags.power) {
1134 /* Register acpi_idle_driver if not already registered */
1135 if (!acpi_processor_registered) {
1136 acpi_processor_setup_cpuidle_states(pr);
1137 retval = cpuidle_register_driver(&acpi_idle_driver);
1140 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1141 acpi_idle_driver.name);
1144 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1147 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1149 acpi_processor_setup_cpuidle_cx(pr, dev);
1151 /* Register per-cpu cpuidle_device. Cpuidle driver
1152 * must already be registered before registering device
1154 retval = cpuidle_register_device(dev);
1156 if (acpi_processor_registered == 0)
1157 cpuidle_unregister_driver(&acpi_idle_driver);
1160 acpi_processor_registered++;
1165 int acpi_processor_power_exit(struct acpi_processor *pr)
1167 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1169 if (disabled_by_idle_boot_param())
1172 if (pr->flags.power) {
1173 cpuidle_unregister_device(dev);
1174 acpi_processor_registered--;
1175 if (acpi_processor_registered == 0)
1176 cpuidle_unregister_driver(&acpi_idle_driver);
1179 pr->flags.power_setup_done = 0;