2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@mips.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/cpuhotplug.h>
12 #include <linux/init.h>
13 #include <linux/percpu.h>
14 #include <linux/slab.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cacheops.h>
20 #include <asm/mips-cps.h>
21 #include <asm/mipsmtregs.h>
23 #include <asm/pm-cps.h>
24 #include <asm/smp-cps.h>
28 * cps_nc_entry_fn - type of a generated non-coherent state entry function
29 * @online: the count of online coupled VPEs
30 * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
32 * The code entering & exiting non-coherent states is generated at runtime
33 * using uasm, in order to ensure that the compiler cannot insert a stray
34 * memory access at an unfortunate time and to allow the generation of optimal
35 * core-specific code particularly for cache routines. If coupled_coherence
36 * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
37 * returns the number of VPEs that were in the wait state at the point this
38 * VPE left it. Returns garbage if coupled_coherence is zero or this is not
39 * the entry function for CPS_PM_NC_WAIT.
41 typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
44 * The entry point of the generated non-coherent idle state entry/exit
45 * functions. Actually per-core rather than per-CPU.
47 static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
50 /* Bitmap indicating which states are supported by the system */
51 static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
54 * Indicates the number of coupled VPEs ready to operate in a non-coherent
55 * state. Actually per-core rather than per-CPU.
57 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
59 /* Indicates online CPUs coupled with the current CPU */
60 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
63 * Used to synchronize entry to deep idle states. Actually per-core rather
66 static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
68 /* Saved CPU state across the CPS_PM_POWER_GATED state */
69 DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
71 /* A somewhat arbitrary number of labels & relocs for uasm */
72 static struct uasm_label labels[32];
73 static struct uasm_reloc relocs[32];
76 zero, at, v0, v1, a0, a1, a2, a3,
77 t0, t1, t2, t3, t4, t5, t6, t7,
78 s0, s1, s2, s3, s4, s5, s6, s7,
79 t8, t9, k0, k1, gp, sp, fp, ra,
82 bool cps_pm_support_state(enum cps_pm_state state)
84 return test_bit(state, state_support);
87 static void coupled_barrier(atomic_t *a, unsigned online)
90 * This function is effectively the same as
91 * cpuidle_coupled_parallel_barrier, which can't be used here since
92 * there's no cpuidle device.
95 if (!coupled_coherence)
98 smp_mb__before_atomic();
101 while (atomic_read(a) < online)
104 if (atomic_inc_return(a) == online * 2) {
109 while (atomic_read(a) > online)
113 int cps_pm_enter_state(enum cps_pm_state state)
115 unsigned cpu = smp_processor_id();
116 unsigned core = cpu_core(¤t_cpu_data);
117 unsigned online, left;
118 cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
119 u32 *core_ready_count, *nc_core_ready_count;
121 cps_nc_entry_fn entry;
122 struct core_boot_config *core_cfg;
123 struct vpe_boot_config *vpe_cfg;
125 /* Check that there is an entry function for this state */
126 entry = per_cpu(nc_asm_enter, core)[state];
130 /* Calculate which coupled CPUs (VPEs) are online */
131 #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
132 if (cpu_online(cpu)) {
133 cpumask_and(coupled_mask, cpu_online_mask,
134 &cpu_sibling_map[cpu]);
135 online = cpumask_weight(coupled_mask);
136 cpumask_clear_cpu(cpu, coupled_mask);
140 cpumask_clear(coupled_mask);
144 /* Setup the VPE to run mips_cps_pm_restore when started again */
145 if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
146 /* Power gating relies upon CPS SMP */
147 if (!mips_cps_smp_in_use())
150 core_cfg = &mips_cps_core_bootcfg[core];
151 vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)];
152 vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
153 vpe_cfg->gp = (unsigned long)current_thread_info();
157 /* Indicate that this CPU might not be coherent */
158 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
159 smp_mb__after_atomic();
161 /* Create a non-coherent mapping of the core ready_count */
162 core_ready_count = per_cpu(ready_count, core);
163 nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
164 (unsigned long)core_ready_count);
165 nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
166 nc_core_ready_count = nc_addr;
168 /* Ensure ready_count is zero-initialised before the assembly runs */
169 ACCESS_ONCE(*nc_core_ready_count) = 0;
170 coupled_barrier(&per_cpu(pm_barrier, core), online);
172 /* Run the generated entry code */
173 left = entry(online, nc_core_ready_count);
175 /* Remove the non-coherent mapping of ready_count */
176 kunmap_noncoherent();
178 /* Indicate that this CPU is definitely coherent */
179 cpumask_set_cpu(cpu, &cpu_coherent_mask);
182 * If this VPE is the first to leave the non-coherent wait state then
183 * it needs to wake up any coupled VPEs still running their wait
184 * instruction so that they return to cpuidle, which can then complete
185 * coordination between the coupled VPEs & provide the governor with
186 * a chance to reflect on the length of time the VPEs were in the
189 if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
190 arch_send_call_function_ipi_mask(coupled_mask);
195 static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
196 struct uasm_reloc **pr,
197 const struct cache_desc *cache,
198 unsigned op, int lbl)
200 unsigned cache_size = cache->ways << cache->waybit;
202 const unsigned unroll_lines = 32;
204 /* If the cache isn't present this function has it easy */
205 if (cache->flags & MIPS_CACHE_NOT_PRESENT)
208 /* Load base address */
209 UASM_i_LA(pp, t0, (long)CKSEG0);
211 /* Calculate end address */
212 if (cache_size < 0x8000)
213 uasm_i_addiu(pp, t1, t0, cache_size);
215 UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
217 /* Start of cache op loop */
218 uasm_build_label(pl, *pp, lbl);
220 /* Generate the cache ops */
221 for (i = 0; i < unroll_lines; i++) {
222 if (cpu_has_mips_r6) {
223 uasm_i_cache(pp, op, 0, t0);
224 uasm_i_addiu(pp, t0, t0, cache->linesz);
226 uasm_i_cache(pp, op, i * cache->linesz, t0);
230 if (!cpu_has_mips_r6)
231 /* Update the base address */
232 uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
234 /* Loop if we haven't reached the end address yet */
235 uasm_il_bne(pp, pr, t0, t1, lbl);
239 static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
240 struct uasm_reloc **pr,
241 const struct cpuinfo_mips *cpu_info,
244 unsigned i, fsb_size = 8;
245 unsigned num_loads = (fsb_size * 3) / 2;
246 unsigned line_stride = 2;
247 unsigned line_size = cpu_info->dcache.linesz;
248 unsigned perf_counter, perf_event;
249 unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
252 * Determine whether this CPU requires an FSB flush, and if so which
253 * performance counter/event reflect stalls due to a full FSB.
255 switch (__get_cpu_type(cpu_info->cputype)) {
262 /* Newer proAptiv cores don't require this workaround */
263 if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
266 /* On older ones it's unavailable */
270 /* Assume that the CPU does not need this workaround */
275 * Ensure that the fill/store buffer (FSB) is not holding the results
276 * of a prefetch, since if it is then the CPC sequencer may become
277 * stuck in the D3 (ClrBus) state whilst entering a low power state.
280 /* Preserve perf counter setup */
281 uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
282 uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
284 /* Setup perf counter to count FSB full pipeline stalls */
285 uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
286 uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
288 uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
291 /* Base address for loads */
292 UASM_i_LA(pp, t0, (long)CKSEG0);
294 /* Start of clear loop */
295 uasm_build_label(pl, *pp, lbl);
297 /* Perform some loads to fill the FSB */
298 for (i = 0; i < num_loads; i++)
299 uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
302 * Invalidate the new D-cache entries so that the cache will need
303 * refilling (via the FSB) if the loop is executed again.
305 for (i = 0; i < num_loads; i++) {
306 uasm_i_cache(pp, Hit_Invalidate_D,
307 i * line_size * line_stride, t0);
308 uasm_i_cache(pp, Hit_Writeback_Inv_SD,
309 i * line_size * line_stride, t0);
312 /* Barrier ensuring previous cache invalidates are complete */
313 uasm_i_sync(pp, STYPE_SYNC);
316 /* Check whether the pipeline stalled due to the FSB being full */
317 uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
319 /* Loop if it didn't */
320 uasm_il_beqz(pp, pr, t1, lbl);
323 /* Restore perf counter 1. The count may well now be wrong... */
324 uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
326 uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
332 static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
333 struct uasm_reloc **pr,
334 unsigned r_addr, int lbl)
336 uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
337 uasm_build_label(pl, *pp, lbl);
338 uasm_i_ll(pp, t1, 0, r_addr);
339 uasm_i_or(pp, t1, t1, t0);
340 uasm_i_sc(pp, t1, 0, r_addr);
341 uasm_il_beqz(pp, pr, t1, lbl);
345 static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
347 struct uasm_label *l = labels;
348 struct uasm_reloc *r = relocs;
350 const unsigned r_online = a0;
351 const unsigned r_nc_count = a1;
352 const unsigned r_pcohctl = t7;
353 const unsigned max_instrs = 256;
360 lbl_disable_coherence,
370 /* Allocate a buffer to hold the generated code */
371 p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
375 /* Clear labels & relocs ready for (re)use */
376 memset(labels, 0, sizeof(labels));
377 memset(relocs, 0, sizeof(relocs));
379 if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
380 /* Power gating relies upon CPS SMP */
381 if (!mips_cps_smp_in_use())
385 * Save CPU state. Note the non-standard calling convention
386 * with the return address placed in v0 to avoid clobbering
387 * the ra register before it is saved.
389 UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
390 uasm_i_jalr(&p, v0, t0);
395 * Load addresses of required CM & CPC registers. This is done early
396 * because they're needed in both the enable & disable coherence steps
397 * but in the coupled case the enable step will only run on one VPE.
399 UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
401 if (coupled_coherence) {
402 /* Increment ready_count */
403 uasm_i_sync(&p, STYPE_SYNC_MB);
404 uasm_build_label(&l, p, lbl_incready);
405 uasm_i_ll(&p, t1, 0, r_nc_count);
406 uasm_i_addiu(&p, t2, t1, 1);
407 uasm_i_sc(&p, t2, 0, r_nc_count);
408 uasm_il_beqz(&p, &r, t2, lbl_incready);
409 uasm_i_addiu(&p, t1, t1, 1);
411 /* Barrier ensuring all CPUs see the updated r_nc_count value */
412 uasm_i_sync(&p, STYPE_SYNC_MB);
415 * If this is the last VPE to become ready for non-coherence
416 * then it should branch below.
418 uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
421 if (state < CPS_PM_POWER_GATED) {
423 * Otherwise this is not the last VPE to become ready
424 * for non-coherence. It needs to wait until coherence
425 * has been disabled before proceeding, which it will do
426 * by polling for the top bit of ready_count being set.
428 uasm_i_addiu(&p, t1, zero, -1);
429 uasm_build_label(&l, p, lbl_poll_cont);
430 uasm_i_lw(&p, t0, 0, r_nc_count);
431 uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
434 uasm_i_yield(&p, zero, t1);
435 uasm_il_b(&p, &r, lbl_poll_cont);
439 * The core will lose power & this VPE will not continue
440 * so it can simply halt here.
442 if (cpu_has_mipsmt) {
443 /* Halt the VPE via C0 tchalt register */
444 uasm_i_addiu(&p, t0, zero, TCHALT_H);
445 uasm_i_mtc0(&p, t0, 2, 4);
446 } else if (cpu_has_vp) {
447 /* Halt the VP via the CPC VP_STOP register */
450 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
451 uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
452 UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
453 uasm_i_sw(&p, t0, 0, t1);
457 uasm_build_label(&l, p, lbl_secondary_hang);
458 uasm_il_b(&p, &r, lbl_secondary_hang);
464 * This is the point of no return - this VPE will now proceed to
465 * disable coherence. At this point we *must* be sure that no other
466 * VPE within the core will interfere with the L1 dcache.
468 uasm_build_label(&l, p, lbl_disable_coherence);
470 /* Invalidate the L1 icache */
471 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
472 Index_Invalidate_I, lbl_invicache);
474 /* Writeback & invalidate the L1 dcache */
475 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
476 Index_Writeback_Inv_D, lbl_flushdcache);
478 /* Barrier ensuring previous cache invalidates are complete */
479 uasm_i_sync(&p, STYPE_SYNC);
482 if (mips_cm_revision() < CM_REV_CM3) {
484 * Disable all but self interventions. The load from COHCTL is
485 * defined by the interAptiv & proAptiv SUMs as ensuring that the
486 * operation resulting from the preceding store is complete.
488 uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
489 uasm_i_sw(&p, t0, 0, r_pcohctl);
490 uasm_i_lw(&p, t0, 0, r_pcohctl);
492 /* Barrier to ensure write to coherence control is complete */
493 uasm_i_sync(&p, STYPE_SYNC);
497 /* Disable coherence */
498 uasm_i_sw(&p, zero, 0, r_pcohctl);
499 uasm_i_lw(&p, t0, 0, r_pcohctl);
501 if (state >= CPS_PM_CLOCK_GATED) {
502 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
507 /* Determine the CPC command to issue */
509 case CPS_PM_CLOCK_GATED:
510 cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
512 case CPS_PM_POWER_GATED:
513 cpc_cmd = CPC_Cx_CMD_PWRDOWN;
520 /* Issue the CPC command */
521 UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
522 uasm_i_addiu(&p, t1, zero, cpc_cmd);
523 uasm_i_sw(&p, t1, 0, t0);
525 if (state == CPS_PM_POWER_GATED) {
526 /* If anything goes wrong just hang */
527 uasm_build_label(&l, p, lbl_hang);
528 uasm_il_b(&p, &r, lbl_hang);
532 * There's no point generating more code, the core is
533 * powered down & if powered back up will run from the
534 * reset vector not from here.
539 /* Barrier to ensure write to CPC command is complete */
540 uasm_i_sync(&p, STYPE_SYNC);
544 if (state == CPS_PM_NC_WAIT) {
546 * At this point it is safe for all VPEs to proceed with
547 * execution. This VPE will set the top bit of ready_count
548 * to indicate to the other VPEs that they may continue.
550 if (coupled_coherence)
551 cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
555 * VPEs which did not disable coherence will continue
556 * executing, after coherence has been disabled, from this
559 uasm_build_label(&l, p, lbl_secondary_cont);
561 /* Now perform our wait */
566 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
567 * will run this. The first will actually re-enable coherence & the
568 * rest will just be performing a rather unusual nop.
570 uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
571 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN
572 : CM3_GCR_Cx_COHERENCE_COHEN);
574 uasm_i_sw(&p, t0, 0, r_pcohctl);
575 uasm_i_lw(&p, t0, 0, r_pcohctl);
577 /* Barrier to ensure write to coherence control is complete */
578 uasm_i_sync(&p, STYPE_SYNC);
581 if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
582 /* Decrement ready_count */
583 uasm_build_label(&l, p, lbl_decready);
584 uasm_i_sync(&p, STYPE_SYNC_MB);
585 uasm_i_ll(&p, t1, 0, r_nc_count);
586 uasm_i_addiu(&p, t2, t1, -1);
587 uasm_i_sc(&p, t2, 0, r_nc_count);
588 uasm_il_beqz(&p, &r, t2, lbl_decready);
589 uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
591 /* Barrier ensuring all CPUs see the updated r_nc_count value */
592 uasm_i_sync(&p, STYPE_SYNC_MB);
595 if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
597 * At this point it is safe for all VPEs to proceed with
598 * execution. This VPE will set the top bit of ready_count
599 * to indicate to the other VPEs that they may continue.
601 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
604 * This core will be reliant upon another core sending a
605 * power-up command to the CPC in order to resume operation.
606 * Thus an arbitrary VPE can't trigger the core leaving the
607 * idle state and the one that disables coherence might as well
608 * be the one to re-enable it. The rest will continue from here
609 * after that has been done.
611 uasm_build_label(&l, p, lbl_secondary_cont);
613 /* Barrier ensuring all CPUs see the updated r_nc_count value */
614 uasm_i_sync(&p, STYPE_SYNC_MB);
617 /* The core is coherent, time to return to C code */
622 /* Ensure the code didn't exceed the resources allocated for it */
623 BUG_ON((p - buf) > max_instrs);
624 BUG_ON((l - labels) > ARRAY_SIZE(labels));
625 BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
627 /* Patch branch offsets */
628 uasm_resolve_relocs(relocs, labels);
630 /* Flush the icache */
631 local_flush_icache_range((unsigned long)buf, (unsigned long)p);
639 static int cps_pm_online_cpu(unsigned int cpu)
641 enum cps_pm_state state;
642 unsigned core = cpu_core(&cpu_data[cpu]);
643 void *entry_fn, *core_rc;
645 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
646 if (per_cpu(nc_asm_enter, core)[state])
648 if (!test_bit(state, state_support))
651 entry_fn = cps_gen_entry_code(cpu, state);
653 pr_err("Failed to generate core %u state %u entry\n",
655 clear_bit(state, state_support);
658 per_cpu(nc_asm_enter, core)[state] = entry_fn;
661 if (!per_cpu(ready_count, core)) {
662 core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
664 pr_err("Failed allocate core %u ready_count\n", core);
667 per_cpu(ready_count, core) = core_rc;
673 static int __init cps_pm_init(void)
675 /* A CM is required for all non-coherent states */
676 if (!mips_cm_present()) {
677 pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
682 * If interrupts were enabled whilst running a wait instruction on a
683 * non-coherent core then the VPE may end up processing interrupts
684 * whilst non-coherent. That would be bad.
686 if (cpu_wait == r4k_wait_irqoff)
687 set_bit(CPS_PM_NC_WAIT, state_support);
689 pr_warn("pm-cps: non-coherent wait unavailable\n");
691 /* Detect whether a CPC is present */
692 if (mips_cpc_present()) {
693 /* Detect whether clock gating is implemented */
694 if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
695 set_bit(CPS_PM_CLOCK_GATED, state_support);
697 pr_warn("pm-cps: CPC does not support clock gating\n");
699 /* Power gating is available with CPS SMP & any CPC */
700 if (mips_cps_smp_in_use())
701 set_bit(CPS_PM_POWER_GATED, state_support);
703 pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
705 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
708 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
709 cps_pm_online_cpu, NULL);
711 arch_initcall(cps_pm_init);