1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015 IBM Corp.
8 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/cpu.h>
15 #include <asm/firmware.h>
16 #include <asm/interrupt.h>
17 #include <asm/machdep.h>
19 #include <asm/cputhreads.h>
20 #include <asm/cpuidle.h>
21 #include <asm/code-patching.h>
23 #include <asm/runlatch.h>
24 #include <asm/dbell.h>
29 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
30 #define MAX_STOP_STATE 0xF
32 #define P9_STOP_SPR_MSR 2000
33 #define P9_STOP_SPR_PSSCR 855
35 static u32 supported_cpuidle_states;
36 struct pnv_idle_states_t *pnv_idle_states;
37 int nr_pnv_idle_states;
40 * The default stop state that will be used by ppc_md.power_save
41 * function on platforms that support stop instruction.
43 static u64 pnv_default_stop_val;
44 static u64 pnv_default_stop_mask;
45 static bool default_stop_found;
48 * First stop state levels when SPR and TB loss can occur.
50 static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
51 static u64 deep_spr_loss_state = MAX_STOP_STATE + 1;
54 * psscr value and mask of the deepest stop idle state.
55 * Used when a cpu is offlined.
57 static u64 pnv_deepest_stop_psscr_val;
58 static u64 pnv_deepest_stop_psscr_mask;
59 static u64 pnv_deepest_stop_flag;
60 static bool deepest_stop_found;
62 static unsigned long power7_offline_type;
64 static int __init pnv_save_sprs_for_deep_states(void)
70 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
71 * all cpus at boot. Get these reg values of current cpu and use the
72 * same across all cpus.
74 uint64_t lpcr_val = mfspr(SPRN_LPCR);
75 uint64_t hid0_val = mfspr(SPRN_HID0);
76 uint64_t hmeer_val = mfspr(SPRN_HMEER);
77 uint64_t msr_val = MSR_IDLE;
78 uint64_t psscr_val = pnv_deepest_stop_psscr_val;
80 for_each_present_cpu(cpu) {
81 uint64_t pir = get_hard_smp_processor_id(cpu);
82 uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
84 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
88 rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
92 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
93 rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
97 rc = opal_slw_set_reg(pir,
98 P9_STOP_SPR_PSSCR, psscr_val);
104 /* HIDs are per core registers */
105 if (cpu_thread_in_core(cpu) == 0) {
107 rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
111 rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
115 /* Only p8 needs to set extra HID registers */
116 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
117 uint64_t hid1_val = mfspr(SPRN_HID1);
118 uint64_t hid4_val = mfspr(SPRN_HID4);
119 uint64_t hid5_val = mfspr(SPRN_HID5);
121 rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
125 rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
129 rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
139 u32 pnv_get_supported_cpuidle_states(void)
141 return supported_cpuidle_states;
143 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
145 static void pnv_fastsleep_workaround_apply(void *info)
148 int cpu = smp_processor_id();
152 if (cpu_first_thread_sibling(cpu) != cpu)
155 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
156 OPAL_CONFIG_IDLE_APPLY);
161 static bool power7_fastsleep_workaround_entry = true;
162 static bool power7_fastsleep_workaround_exit = true;
165 * Used to store fastsleep workaround state
166 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
167 * 1 - Workaround applied once, never undone.
169 static u8 fastsleep_workaround_applyonce;
171 static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
172 struct device_attribute *attr, char *buf)
174 return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
177 static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
178 struct device_attribute *attr, const char *buf,
184 if (kstrtou8(buf, 0, &val) || val != 1)
187 if (fastsleep_workaround_applyonce == 1)
191 * fastsleep_workaround_applyonce = 1 implies
192 * fastsleep workaround needs to be left in 'applied' state on all
193 * the cores. Do this by-
194 * 1. Disable the 'undo' workaround in fastsleep exit path
195 * 2. Sendi IPIs to all the cores which have at least one online thread
196 * 3. Disable the 'apply' workaround in fastsleep entry path
198 * There is no need to send ipi to cores which have all threads
199 * offlined, as last thread of the core entering fastsleep or deeper
200 * state would have applied workaround.
202 power7_fastsleep_workaround_exit = false;
205 on_each_cpu(pnv_fastsleep_workaround_apply, &err, 1);
208 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
212 power7_fastsleep_workaround_entry = false;
214 fastsleep_workaround_applyonce = 1;
221 static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
222 show_fastsleep_workaround_applyonce,
223 store_fastsleep_workaround_applyonce);
225 static inline void atomic_start_thread_idle(void)
227 int cpu = raw_smp_processor_id();
228 int first = cpu_first_thread_sibling(cpu);
229 int thread_nr = cpu_thread_in_core(cpu);
230 unsigned long *state = &paca_ptrs[first]->idle_state;
232 clear_bit(thread_nr, state);
235 static inline void atomic_stop_thread_idle(void)
237 int cpu = raw_smp_processor_id();
238 int first = cpu_first_thread_sibling(cpu);
239 int thread_nr = cpu_thread_in_core(cpu);
240 unsigned long *state = &paca_ptrs[first]->idle_state;
242 set_bit(thread_nr, state);
245 static inline void atomic_lock_thread_idle(void)
247 int cpu = raw_smp_processor_id();
248 int first = cpu_first_thread_sibling(cpu);
249 unsigned long *state = &paca_ptrs[first]->idle_state;
251 while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state)))
255 static inline void atomic_unlock_and_stop_thread_idle(void)
257 int cpu = raw_smp_processor_id();
258 int first = cpu_first_thread_sibling(cpu);
259 unsigned long thread = 1UL << cpu_thread_in_core(cpu);
260 unsigned long *state = &paca_ptrs[first]->idle_state;
261 u64 s = READ_ONCE(*state);
264 BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT));
268 new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT;
269 tmp = cmpxchg(state, s, new);
270 if (unlikely(tmp != s)) {
276 static inline void atomic_unlock_thread_idle(void)
278 int cpu = raw_smp_processor_id();
279 int first = cpu_first_thread_sibling(cpu);
280 unsigned long *state = &paca_ptrs[first]->idle_state;
282 BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state));
283 clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state);
305 /* per thread SPRs that get lost in shallow states */
309 /* amor is restored to constant ~0 */
312 static unsigned long power7_idle_insn(unsigned long type)
314 int cpu = raw_smp_processor_id();
315 int first = cpu_first_thread_sibling(cpu);
316 unsigned long *state = &paca_ptrs[first]->idle_state;
317 unsigned long thread = 1UL << cpu_thread_in_core(cpu);
318 unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
321 struct p7_sprs sprs = {}; /* avoid false use-uninitialised */
322 bool sprs_saved = false;
325 if (unlikely(type != PNV_THREAD_NAP)) {
326 atomic_lock_thread_idle();
328 BUG_ON(!(*state & thread));
331 if (power7_fastsleep_workaround_entry) {
332 if ((*state & core_thread_mask) == 0) {
333 rc = opal_config_cpu_idle_state(
334 OPAL_CONFIG_IDLE_FASTSLEEP,
335 OPAL_CONFIG_IDLE_APPLY);
340 if (type == PNV_THREAD_WINKLE) {
341 sprs.tscr = mfspr(SPRN_TSCR);
342 sprs.worc = mfspr(SPRN_WORC);
344 sprs.sdr1 = mfspr(SPRN_SDR1);
345 sprs.rpr = mfspr(SPRN_RPR);
347 sprs.lpcr = mfspr(SPRN_LPCR);
348 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
349 sprs.hfscr = mfspr(SPRN_HFSCR);
350 sprs.fscr = mfspr(SPRN_FSCR);
352 sprs.purr = mfspr(SPRN_PURR);
353 sprs.spurr = mfspr(SPRN_SPURR);
354 sprs.dscr = mfspr(SPRN_DSCR);
355 sprs.wort = mfspr(SPRN_WORT);
360 * Increment winkle counter and set all winkle bits if
361 * all threads are winkling. This allows wakeup side to
362 * distinguish between fast sleep and winkle state
363 * loss. Fast sleep still has to resync the timebase so
364 * this may not be a really big win.
366 *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
367 if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS)
368 >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
370 *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS;
371 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
374 atomic_unlock_thread_idle();
377 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
378 sprs.amr = mfspr(SPRN_AMR);
379 sprs.iamr = mfspr(SPRN_IAMR);
380 sprs.uamor = mfspr(SPRN_UAMOR);
383 local_paca->thread_idle_state = type;
384 srr1 = isa206_idle_insn_mayloss(type); /* go idle */
385 local_paca->thread_idle_state = PNV_THREAD_RUNNING;
388 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
390 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
391 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
393 * We don't need an isync after the mtsprs here because
394 * the upcoming mtmsrd is execution synchronizing.
396 mtspr(SPRN_AMR, sprs.amr);
397 mtspr(SPRN_IAMR, sprs.iamr);
398 mtspr(SPRN_AMOR, ~0);
399 mtspr(SPRN_UAMOR, sprs.uamor);
403 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
404 hmi_exception_realmode(NULL);
406 if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) {
407 if (unlikely(type != PNV_THREAD_NAP)) {
408 atomic_lock_thread_idle();
409 if (type == PNV_THREAD_WINKLE) {
410 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
411 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
412 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
414 atomic_unlock_and_stop_thread_idle();
420 BUG_ON(type == PNV_THREAD_NAP);
422 atomic_lock_thread_idle();
425 if (type == PNV_THREAD_WINKLE) {
426 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
427 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
428 if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
429 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
435 WARN_ON(*state & thread);
437 if ((*state & core_thread_mask) != 0)
442 mtspr(SPRN_TSCR, sprs.tscr);
443 mtspr(SPRN_WORC, sprs.worc);
446 if (power7_fastsleep_workaround_exit) {
447 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
448 OPAL_CONFIG_IDLE_UNDO);
453 if (opal_resync_timebase() != OPAL_SUCCESS)
460 if ((*state & local_paca->subcore_sibling_mask) != 0)
463 /* Per-subcore SPRs */
464 mtspr(SPRN_SDR1, sprs.sdr1);
465 mtspr(SPRN_RPR, sprs.rpr);
469 * isync after restoring shared SPRs and before unlocking. Unlock
470 * only contains hwsync which does not necessarily do the right
474 atomic_unlock_and_stop_thread_idle();
476 /* Fast sleep does not lose SPRs */
480 /* Per-thread SPRs */
481 mtspr(SPRN_LPCR, sprs.lpcr);
482 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
483 mtspr(SPRN_HFSCR, sprs.hfscr);
484 mtspr(SPRN_FSCR, sprs.fscr);
486 mtspr(SPRN_PURR, sprs.purr);
487 mtspr(SPRN_SPURR, sprs.spurr);
488 mtspr(SPRN_DSCR, sprs.dscr);
489 mtspr(SPRN_WORT, sprs.wort);
491 mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
493 #ifdef CONFIG_PPC_64S_HASH_MMU
495 * The SLB has to be restored here, but it sometimes still
496 * contains entries, so the __ variant must be used to prevent
499 __slb_restore_bolted_realmode();
505 extern unsigned long idle_kvm_start_guest(unsigned long srr1);
507 #ifdef CONFIG_HOTPLUG_CPU
508 static unsigned long power7_offline(void)
514 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
515 /* Tell KVM we're entering idle. */
516 /******************************************************/
517 /* N O T E W E L L ! ! ! N O T E W E L L */
518 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
519 /* MUST occur in real mode, i.e. with the MMU off, */
520 /* and the MMU must stay off until we clear this flag */
521 /* and test HSTATE_HWTHREAD_REQ(r13) in */
522 /* pnv_powersave_wakeup in this file. */
523 /* The reason is that another thread can switch the */
524 /* MMU to a guest context whenever this flag is set */
525 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
526 /* that would potentially cause this thread to start */
527 /* executing instructions from guest memory in */
528 /* hypervisor mode, leading to a host crash or data */
529 /* corruption, or worse. */
530 /******************************************************/
531 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
534 __ppc64_runlatch_off();
535 srr1 = power7_idle_insn(power7_offline_type);
536 __ppc64_runlatch_on();
538 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
539 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
540 /* Order setting hwthread_state vs. testing hwthread_req */
542 if (local_paca->kvm_hstate.hwthread_req)
543 srr1 = idle_kvm_start_guest(srr1);
552 void power7_idle_type(unsigned long type)
556 if (!prep_irq_for_idle_irqsoff())
560 __ppc64_runlatch_off();
561 srr1 = power7_idle_insn(type);
562 __ppc64_runlatch_on();
565 fini_irq_for_idle_irqsoff();
566 irq_set_pending_from_srr1(srr1);
569 static void power7_idle(void)
574 power7_idle_type(PNV_THREAD_NAP);
599 /* per thread SPRs that get lost in shallow states */
606 static unsigned long power9_idle_stop(unsigned long psscr)
608 int cpu = raw_smp_processor_id();
609 int first = cpu_first_thread_sibling(cpu);
610 unsigned long *state = &paca_ptrs[first]->idle_state;
611 unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
614 unsigned long mmcr0 = 0;
615 unsigned long mmcra = 0;
616 struct p9_sprs sprs = {}; /* avoid false used-uninitialised */
617 bool sprs_saved = false;
619 if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
623 * Wake synchronously. SRESET via xscom may still cause
624 * a 0x100 powersave wakeup with SRR1 reason!
626 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */
631 * Registers not saved, can't recover!
632 * This would be a hardware bug
634 BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
641 if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) {
642 local_paca->requested_psscr = psscr;
643 /* order setting requested_psscr vs testing dont_stop */
645 if (atomic_read(&local_paca->dont_stop)) {
646 local_paca->requested_psscr = 0;
652 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
654 * POWER9 DD2 can incorrectly set PMAO when waking up
655 * after a state-loss idle. Saving and restoring MMCR0
656 * over idle is a workaround.
658 mmcr0 = mfspr(SPRN_MMCR0);
661 if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
662 sprs.lpcr = mfspr(SPRN_LPCR);
663 sprs.hfscr = mfspr(SPRN_HFSCR);
664 sprs.fscr = mfspr(SPRN_FSCR);
665 sprs.pid = mfspr(SPRN_PID);
666 sprs.purr = mfspr(SPRN_PURR);
667 sprs.spurr = mfspr(SPRN_SPURR);
668 sprs.dscr = mfspr(SPRN_DSCR);
669 sprs.ciabr = mfspr(SPRN_CIABR);
671 sprs.mmcra = mfspr(SPRN_MMCRA);
672 sprs.mmcr0 = mfspr(SPRN_MMCR0);
673 sprs.mmcr1 = mfspr(SPRN_MMCR1);
674 sprs.mmcr2 = mfspr(SPRN_MMCR2);
676 sprs.ptcr = mfspr(SPRN_PTCR);
677 sprs.rpr = mfspr(SPRN_RPR);
678 sprs.tscr = mfspr(SPRN_TSCR);
679 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
680 sprs.ldbar = mfspr(SPRN_LDBAR);
684 atomic_start_thread_idle();
687 sprs.amr = mfspr(SPRN_AMR);
688 sprs.iamr = mfspr(SPRN_IAMR);
689 sprs.uamor = mfspr(SPRN_UAMOR);
691 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
693 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
694 local_paca->requested_psscr = 0;
697 psscr = mfspr(SPRN_PSSCR);
700 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
702 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
704 * We don't need an isync after the mtsprs here because the
705 * upcoming mtmsrd is execution synchronizing.
707 mtspr(SPRN_AMR, sprs.amr);
708 mtspr(SPRN_IAMR, sprs.iamr);
709 mtspr(SPRN_AMOR, ~0);
710 mtspr(SPRN_UAMOR, sprs.uamor);
713 * Workaround for POWER9 DD2.0, if we lost resources, the ERAT
714 * might have been corrupted and needs flushing. We also need
715 * to reload MMCR0 (see mmcr0 comment above).
717 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
718 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT);
719 mtspr(SPRN_MMCR0, mmcr0);
723 * DD2.2 and earlier need to set then clear bit 60 in MMCRA
724 * to ensure the PMU starts running.
726 mmcra = mfspr(SPRN_MMCRA);
727 mmcra |= PPC_BIT(60);
728 mtspr(SPRN_MMCRA, mmcra);
729 mmcra &= ~PPC_BIT(60);
730 mtspr(SPRN_MMCRA, mmcra);
733 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
734 hmi_exception_realmode(NULL);
737 * On POWER9, SRR1 bits do not match exactly as expected.
738 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
739 * just always test PSSCR for SPR/TB state loss.
741 pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
742 if (likely(pls < deep_spr_loss_state)) {
744 atomic_stop_thread_idle();
751 atomic_lock_thread_idle();
753 if ((*state & core_thread_mask) != 0)
757 mtspr(SPRN_PTCR, sprs.ptcr);
758 mtspr(SPRN_RPR, sprs.rpr);
759 mtspr(SPRN_TSCR, sprs.tscr);
761 if (pls >= pnv_first_tb_loss_level) {
763 if (opal_resync_timebase() != OPAL_SUCCESS)
768 * isync after restoring shared SPRs and before unlocking. Unlock
769 * only contains hwsync which does not necessarily do the right
775 atomic_unlock_and_stop_thread_idle();
777 /* Per-thread SPRs */
778 mtspr(SPRN_LPCR, sprs.lpcr);
779 mtspr(SPRN_HFSCR, sprs.hfscr);
780 mtspr(SPRN_FSCR, sprs.fscr);
781 mtspr(SPRN_PID, sprs.pid);
782 mtspr(SPRN_PURR, sprs.purr);
783 mtspr(SPRN_SPURR, sprs.spurr);
784 mtspr(SPRN_DSCR, sprs.dscr);
785 mtspr(SPRN_CIABR, sprs.ciabr);
787 mtspr(SPRN_MMCRA, sprs.mmcra);
788 mtspr(SPRN_MMCR0, sprs.mmcr0);
789 mtspr(SPRN_MMCR1, sprs.mmcr1);
790 mtspr(SPRN_MMCR2, sprs.mmcr2);
791 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
792 mtspr(SPRN_LDBAR, sprs.ldbar);
794 mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
796 if (!radix_enabled())
797 __slb_restore_bolted_realmode();
805 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
807 * This is used in working around bugs in thread reconfiguration
808 * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
809 * memory and the way that XER[SO] is checkpointed.
810 * This function forces the core into SMT4 in order by asking
811 * all other threads not to stop, and sending a message to any
812 * that are in a stop state.
813 * Must be called with preemption disabled.
815 void pnv_power9_force_smt4_catch(void)
818 int awake_threads = 1; /* this thread is awake */
819 int poke_threads = 0;
820 int need_awake = threads_per_core;
822 cpu = smp_processor_id();
823 cpu0 = cpu & ~(threads_per_core - 1);
824 for (thr = 0; thr < threads_per_core; ++thr) {
825 if (cpu != cpu0 + thr)
826 atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
828 /* order setting dont_stop vs testing requested_psscr */
830 for (thr = 0; thr < threads_per_core; ++thr) {
831 if (!paca_ptrs[cpu0+thr]->requested_psscr)
834 poke_threads |= (1 << thr);
837 /* If at least 3 threads are awake, the core is in SMT4 already */
838 if (awake_threads < need_awake) {
839 /* We have to wake some threads; we'll use msgsnd */
840 for (thr = 0; thr < threads_per_core; ++thr) {
841 if (poke_threads & (1 << thr)) {
843 ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
844 paca_ptrs[cpu0+thr]->hw_cpu_id);
847 /* now spin until at least 3 threads are awake */
849 for (thr = 0; thr < threads_per_core; ++thr) {
850 if ((poke_threads & (1 << thr)) &&
851 !paca_ptrs[cpu0+thr]->requested_psscr) {
853 poke_threads &= ~(1 << thr);
856 } while (awake_threads < need_awake);
859 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
861 void pnv_power9_force_smt4_release(void)
865 cpu = smp_processor_id();
866 cpu0 = cpu & ~(threads_per_core - 1);
868 /* clear all the dont_stop flags */
869 for (thr = 0; thr < threads_per_core; ++thr) {
870 if (cpu != cpu0 + thr)
871 atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
874 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
875 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
879 * SPRs that get lost in shallow states:
881 * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1
882 * isa300 idle routines restore CR, LR.
884 * idle thread doesn't use FP or VEC
885 * kernel doesn't use TAR
886 * HSPRG1 is only live in HV interrupt entry
887 * SPRG2 is only live in KVM guests, KVM handles it.
891 static unsigned long power10_idle_stop(unsigned long psscr)
893 int cpu = raw_smp_processor_id();
894 int first = cpu_first_thread_sibling(cpu);
895 unsigned long *state = &paca_ptrs[first]->idle_state;
896 unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
899 // struct p10_sprs sprs = {}; /* avoid false used-uninitialised */
900 bool sprs_saved = false;
902 if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
906 * Wake synchronously. SRESET via xscom may still cause
907 * a 0x100 powersave wakeup with SRR1 reason!
909 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */
914 * Registers not saved, can't recover!
915 * This would be a hardware bug
917 BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
923 if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
924 /* XXX: save SPRs for deep state loss here. */
928 atomic_start_thread_idle();
931 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
933 psscr = mfspr(SPRN_PSSCR);
936 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
938 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
939 hmi_exception_realmode(NULL);
942 * On POWER10, SRR1 bits do not match exactly as expected.
943 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
944 * just always test PSSCR for SPR/TB state loss.
946 pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
947 if (likely(pls < deep_spr_loss_state)) {
949 atomic_stop_thread_idle();
956 atomic_lock_thread_idle();
958 if ((*state & core_thread_mask) != 0)
961 /* XXX: restore per-core SPRs here */
963 if (pls >= pnv_first_tb_loss_level) {
965 if (opal_resync_timebase() != OPAL_SUCCESS)
970 * isync after restoring shared SPRs and before unlocking. Unlock
971 * only contains hwsync which does not necessarily do the right
977 atomic_unlock_and_stop_thread_idle();
979 /* XXX: restore per-thread SPRs here */
981 if (!radix_enabled())
982 __slb_restore_bolted_realmode();
990 #ifdef CONFIG_HOTPLUG_CPU
991 static unsigned long arch300_offline_stop(unsigned long psscr)
995 if (cpu_has_feature(CPU_FTR_ARCH_31))
996 srr1 = power10_idle_stop(psscr);
998 srr1 = power9_idle_stop(psscr);
1004 void arch300_idle_type(unsigned long stop_psscr_val,
1005 unsigned long stop_psscr_mask)
1007 unsigned long psscr;
1010 if (!prep_irq_for_idle_irqsoff())
1013 psscr = mfspr(SPRN_PSSCR);
1014 psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
1016 __ppc64_runlatch_off();
1017 if (cpu_has_feature(CPU_FTR_ARCH_31))
1018 srr1 = power10_idle_stop(psscr);
1020 srr1 = power9_idle_stop(psscr);
1021 __ppc64_runlatch_on();
1023 fini_irq_for_idle_irqsoff();
1025 irq_set_pending_from_srr1(srr1);
1029 * Used for ppc_md.power_save which needs a function with no parameters
1031 static void arch300_idle(void)
1033 arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
1036 #ifdef CONFIG_HOTPLUG_CPU
1038 void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
1040 u64 pir = get_hard_smp_processor_id(cpu);
1042 mtspr(SPRN_LPCR, lpcr_val);
1045 * Program the LPCR via stop-api only if the deepest stop state
1046 * can lose hypervisor context.
1048 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
1049 opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
1053 * pnv_cpu_offline: A function that puts the CPU into the deepest
1054 * available platform idle state on a CPU-Offline.
1055 * interrupts hard disabled and no lazy irq pending.
1057 unsigned long pnv_cpu_offline(unsigned int cpu)
1061 __ppc64_runlatch_off();
1063 if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
1064 unsigned long psscr;
1066 psscr = mfspr(SPRN_PSSCR);
1067 psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
1068 pnv_deepest_stop_psscr_val;
1069 srr1 = arch300_offline_stop(psscr);
1070 } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
1071 srr1 = power7_offline();
1073 /* This is the fallback method. We emulate snooze */
1074 while (!generic_check_cpu_restart(cpu)) {
1082 __ppc64_runlatch_on();
1089 * Power ISA 3.0 idle initialization.
1091 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
1092 * Register (PSSCR) to control idle behavior.
1095 * ----------------------------------------------------------
1096 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
1097 * ----------------------------------------------------------
1098 * 0 4 41 42 43 44 48 54 56 60
1101 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
1102 * lowest power-saving state the thread entered since stop instruction was
1105 * Bit 41 - Status Disable(SD)
1106 * 0 - Shows PLS entries
1107 * 1 - PLS entries are all 0
1109 * Bit 42 - Enable State Loss
1110 * 0 - No state is lost irrespective of other fields
1111 * 1 - Allows state loss
1113 * Bit 43 - Exit Criterion
1114 * 0 - Exit from power-save mode on any interrupt
1115 * 1 - Exit from power-save mode controlled by LPCR's PECE bits
1117 * Bits 44:47 - Power-Saving Level Limit
1118 * This limits the power-saving level that can be entered into.
1120 * Bits 60:63 - Requested Level
1121 * Used to specify which power-saving level must be entered on executing
1125 int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
1130 * psscr_mask == 0xf indicates an older firmware.
1131 * Set remaining fields of psscr to the default values.
1132 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
1134 if (*psscr_mask == 0xf) {
1135 *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL;
1136 *psscr_mask = PSSCR_HV_DEFAULT_MASK;
1141 * New firmware is expected to set the psscr_val bits correctly.
1142 * Validate that the following invariants are correctly maintained by
1144 * - ESL bit value matches the EC bit value.
1145 * - ESL bit is set for all the deep stop states.
1147 if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) {
1148 err = ERR_EC_ESL_MISMATCH;
1149 } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1150 GET_PSSCR_ESL(*psscr_val) == 0) {
1151 err = ERR_DEEP_STATE_ESL_MISMATCH;
1158 * pnv_arch300_idle_init: Initializes the default idle state, first
1159 * deep idle state and deepest idle state on
1162 * @np: /ibm,opal/power-mgt device node
1163 * @flags: cpu-idle-state-flags array
1164 * @dt_idle_states: Number of idle state entries
1165 * Returns 0 on success
1167 static void __init pnv_arch300_idle_init(void)
1169 u64 max_residency_ns = 0;
1172 /* stop is not really architected, we only have p9,p10 drivers */
1173 if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9))
1177 * pnv_deepest_stop_{val,mask} should be set to values corresponding to
1178 * the deepest stop state.
1180 * pnv_default_stop_{val,mask} should be set to values corresponding to
1181 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
1183 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
1184 deep_spr_loss_state = MAX_STOP_STATE + 1;
1185 for (i = 0; i < nr_pnv_idle_states; i++) {
1187 struct pnv_idle_states_t *state = &pnv_idle_states[i];
1188 u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
1190 /* No deep loss driver implemented for POWER10 yet */
1191 if (pvr_version_is(PVR_POWER10) &&
1192 state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT))
1195 if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1196 (pnv_first_tb_loss_level > psscr_rl))
1197 pnv_first_tb_loss_level = psscr_rl;
1199 if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1200 (deep_spr_loss_state > psscr_rl))
1201 deep_spr_loss_state = psscr_rl;
1204 * The idle code does not deal with TB loss occurring
1205 * in a shallower state than SPR loss, so force it to
1206 * behave like SPRs are lost if TB is lost. POWER9 would
1207 * never encounter this, but a POWER8 core would if it
1208 * implemented the stop instruction. So this is for forward
1211 if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1212 (deep_spr_loss_state > psscr_rl))
1213 deep_spr_loss_state = psscr_rl;
1215 err = validate_psscr_val_mask(&state->psscr_val,
1219 report_invalid_psscr_val(state->psscr_val, err);
1223 state->valid = true;
1225 if (max_residency_ns < state->residency_ns) {
1226 max_residency_ns = state->residency_ns;
1227 pnv_deepest_stop_psscr_val = state->psscr_val;
1228 pnv_deepest_stop_psscr_mask = state->psscr_mask;
1229 pnv_deepest_stop_flag = state->flags;
1230 deepest_stop_found = true;
1233 if (!default_stop_found &&
1234 (state->flags & OPAL_PM_STOP_INST_FAST)) {
1235 pnv_default_stop_val = state->psscr_val;
1236 pnv_default_stop_mask = state->psscr_mask;
1237 default_stop_found = true;
1238 WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT);
1242 if (unlikely(!default_stop_found)) {
1243 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
1245 ppc_md.power_save = arch300_idle;
1246 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
1247 pnv_default_stop_val, pnv_default_stop_mask);
1250 if (unlikely(!deepest_stop_found)) {
1251 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
1253 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
1254 pnv_deepest_stop_psscr_val,
1255 pnv_deepest_stop_psscr_mask);
1258 pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
1259 deep_spr_loss_state);
1261 pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
1262 pnv_first_tb_loss_level);
1265 static void __init pnv_disable_deep_states(void)
1268 * The stop-api is unable to restore hypervisor
1269 * resources on wakeup from platform idle states which
1270 * lose full context. So disable such states.
1272 supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
1273 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
1274 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
1276 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1277 (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
1279 * Use the default stop state for CPU-Hotplug
1282 if (default_stop_found) {
1283 pnv_deepest_stop_psscr_val = pnv_default_stop_val;
1284 pnv_deepest_stop_psscr_mask = pnv_default_stop_mask;
1285 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
1286 pnv_deepest_stop_psscr_val);
1287 } else { /* Fallback to snooze loop for CPU-Hotplug */
1288 deepest_stop_found = false;
1289 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
1295 * Probe device tree for supported idle states
1297 static void __init pnv_probe_idle_states(void)
1301 if (nr_pnv_idle_states < 0) {
1302 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
1306 if (cpu_has_feature(CPU_FTR_ARCH_300))
1307 pnv_arch300_idle_init();
1309 for (i = 0; i < nr_pnv_idle_states; i++)
1310 supported_cpuidle_states |= pnv_idle_states[i].flags;
1314 * This function parses device-tree and populates all the information
1315 * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
1316 * which is the number of cpuidle states discovered through device-tree.
1319 static int __init pnv_parse_cpuidle_dt(void)
1321 struct device_node *np;
1322 int nr_idle_states, i;
1326 const char **temp_string;
1328 np = of_find_node_by_path("/ibm,opal/power-mgt");
1330 pr_warn("opal: PowerMgmt Node not found\n");
1333 nr_idle_states = of_property_count_u32_elems(np,
1334 "ibm,cpu-idle-state-flags");
1336 pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
1338 temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
1339 temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
1340 temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
1342 if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
1343 pr_err("Could not allocate memory for dt parsing\n");
1349 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
1350 temp_u32, nr_idle_states)) {
1351 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
1355 for (i = 0; i < nr_idle_states; i++)
1356 pnv_idle_states[i].flags = temp_u32[i];
1358 /* Read latencies */
1359 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
1360 temp_u32, nr_idle_states)) {
1361 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1365 for (i = 0; i < nr_idle_states; i++)
1366 pnv_idle_states[i].latency_ns = temp_u32[i];
1368 /* Read residencies */
1369 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
1370 temp_u32, nr_idle_states)) {
1371 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
1375 for (i = 0; i < nr_idle_states; i++)
1376 pnv_idle_states[i].residency_ns = temp_u32[i];
1378 /* For power9 and later */
1379 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1380 /* Read pm_crtl_val */
1381 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
1382 temp_u64, nr_idle_states)) {
1383 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
1387 for (i = 0; i < nr_idle_states; i++)
1388 pnv_idle_states[i].psscr_val = temp_u64[i];
1390 /* Read pm_crtl_mask */
1391 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
1392 temp_u64, nr_idle_states)) {
1393 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
1397 for (i = 0; i < nr_idle_states; i++)
1398 pnv_idle_states[i].psscr_mask = temp_u64[i];
1402 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
1403 * ibm,cpu-idle-state-pmicr-val were never used and there is no
1404 * plan to use it in near future. Hence, not parsing these properties
1407 if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
1408 temp_string, nr_idle_states) < 0) {
1409 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
1413 for (i = 0; i < nr_idle_states; i++)
1414 strlcpy(pnv_idle_states[i].name, temp_string[i],
1416 nr_pnv_idle_states = nr_idle_states;
1425 static int __init pnv_init_idle_states(void)
1430 /* Set up PACA fields */
1431 for_each_present_cpu(cpu) {
1432 struct paca_struct *p = paca_ptrs[cpu];
1435 if (cpu == cpu_first_thread_sibling(cpu))
1436 p->idle_state = (1 << threads_per_core) - 1;
1438 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1440 p->thread_idle_state = PNV_THREAD_RUNNING;
1441 } else if (pvr_version_is(PVR_POWER9)) {
1442 /* P9 stop workarounds */
1443 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1444 p->requested_psscr = 0;
1445 atomic_set(&p->dont_stop, 0);
1450 /* In case we error out nr_pnv_idle_states will be zero */
1451 nr_pnv_idle_states = 0;
1452 supported_cpuidle_states = 0;
1454 if (cpuidle_disable != IDLE_NO_OVERRIDE)
1456 rc = pnv_parse_cpuidle_dt();
1459 pnv_probe_idle_states();
1461 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1462 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
1463 power7_fastsleep_workaround_entry = false;
1464 power7_fastsleep_workaround_exit = false;
1467 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
1468 * workaround is needed to use fastsleep. Provide sysfs
1469 * control to choose how this workaround has to be
1472 device_create_file(cpu_subsys.dev_root,
1473 &dev_attr_fastsleep_workaround_applyonce);
1476 update_subcore_sibling_mask();
1478 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) {
1479 ppc_md.power_save = power7_idle;
1480 power7_offline_type = PNV_THREAD_NAP;
1483 if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) &&
1484 (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT))
1485 power7_offline_type = PNV_THREAD_WINKLE;
1486 else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) ||
1487 (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1))
1488 power7_offline_type = PNV_THREAD_SLEEP;
1491 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
1492 if (pnv_save_sprs_for_deep_states())
1493 pnv_disable_deep_states();
1499 machine_subsys_initcall(powernv, pnv_init_idle_states);