1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24 #include <linux/vmalloc.h>
25 #include <linux/ftrace.h>
26 #include <linux/cpu.h>
27 #include <linux/slab.h>
28 #include <linux/kgdb.h>
31 #include <asm/ptrace.h>
32 #include <linux/atomic.h>
33 #include <asm/tlbflush.h>
34 #include <asm/mmu_context.h>
35 #include <asm/cpudata.h>
36 #include <asm/hvtramp.h>
38 #include <asm/timer.h>
39 #include <asm/setup.h>
42 #include <asm/irq_regs.h>
44 #include <asm/pgtable.h>
45 #include <asm/oplib.h>
46 #include <asm/uaccess.h>
47 #include <asm/starfire.h>
49 #include <asm/sections.h>
51 #include <asm/mdesc.h>
53 #include <asm/hypervisor.h>
59 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
60 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
63 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
64 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
66 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
67 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
69 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
70 EXPORT_SYMBOL(cpu_core_map);
71 EXPORT_SYMBOL(cpu_core_sib_map);
72 EXPORT_SYMBOL(cpu_core_sib_cache_map);
74 static cpumask_t smp_commenced_mask;
76 void smp_info(struct seq_file *m)
80 seq_printf(m, "State:\n");
81 for_each_online_cpu(i)
82 seq_printf(m, "CPU%d:\t\tonline\n", i);
85 void smp_bogo(struct seq_file *m)
89 for_each_online_cpu(i)
91 "Cpu%dClkTck\t: %016lx\n",
92 i, cpu_data(i).clock_tick);
95 extern void setup_sparc64_timer(void);
97 static volatile unsigned long callin_flag = 0;
101 int cpuid = hard_smp_processor_id();
103 __local_per_cpu_offset = __per_cpu_offset(cpuid);
105 if (tlb_type == hypervisor)
106 sun4v_ktsb_register();
110 setup_sparc64_timer();
112 if (cheetah_pcache_forced_on)
113 cheetah_enable_pcache();
116 __asm__ __volatile__("membar #Sync\n\t"
117 "flush %%g6" : : : "memory");
119 /* Clear this or we will die instantly when we
120 * schedule back to this idler...
122 current_thread_info()->new_child = 0;
124 /* Attach to the address space of init_task. */
125 atomic_inc(&init_mm.mm_count);
126 current->active_mm = &init_mm;
128 /* inform the notifiers about the new cpu */
129 notify_cpu_starting(cpuid);
131 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
134 set_cpu_online(cpuid, true);
136 /* idle thread is expected to have preempt disabled */
141 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
146 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
147 panic("SMP bolixed\n");
150 /* This tick register synchronization scheme is taken entirely from
151 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
153 * The only change I've made is to rework it so that the master
154 * initiates the synchonization instead of the slave. -DaveM
158 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
160 #define NUM_ROUNDS 64 /* magic value */
161 #define NUM_ITERS 5 /* likewise */
163 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
164 static unsigned long go[SLAVE + 1];
166 #define DEBUG_TICK_SYNC 0
168 static inline long get_delta (long *rt, long *master)
170 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
171 unsigned long tcenter, t0, t1, tm;
174 for (i = 0; i < NUM_ITERS; i++) {
175 t0 = tick_ops->get_tick();
177 membar_safe("#StoreLoad");
178 while (!(tm = go[SLAVE]))
182 t1 = tick_ops->get_tick();
184 if (t1 - t0 < best_t1 - best_t0)
185 best_t0 = t0, best_t1 = t1, best_tm = tm;
188 *rt = best_t1 - best_t0;
189 *master = best_tm - best_t0;
191 /* average best_t0 and best_t1 without overflow: */
192 tcenter = (best_t0/2 + best_t1/2);
193 if (best_t0 % 2 + best_t1 % 2 == 2)
195 return tcenter - best_tm;
198 void smp_synchronize_tick_client(void)
200 long i, delta, adj, adjust_latency = 0, done = 0;
201 unsigned long flags, rt, master_time_stamp;
204 long rt; /* roundtrip time */
205 long master; /* master's timestamp */
206 long diff; /* difference between midpoint and master's timestamp */
207 long lat; /* estimate of itc adjustment latency */
216 local_irq_save(flags);
218 for (i = 0; i < NUM_ROUNDS; i++) {
219 delta = get_delta(&rt, &master_time_stamp);
221 done = 1; /* let's lock on to this... */
225 adjust_latency += -delta;
226 adj = -delta + adjust_latency/4;
230 tick_ops->add_tick(adj);
234 t[i].master = master_time_stamp;
236 t[i].lat = adjust_latency/4;
240 local_irq_restore(flags);
243 for (i = 0; i < NUM_ROUNDS; i++)
244 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
245 t[i].rt, t[i].master, t[i].diff, t[i].lat);
248 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
249 "(last diff %ld cycles, maxerr %lu cycles)\n",
250 smp_processor_id(), delta, rt);
253 static void smp_start_sync_tick_client(int cpu);
255 static void smp_synchronize_one_tick(int cpu)
257 unsigned long flags, i;
261 smp_start_sync_tick_client(cpu);
263 /* wait for client to be ready */
267 /* now let the client proceed into his loop */
269 membar_safe("#StoreLoad");
271 raw_spin_lock_irqsave(&itc_sync_lock, flags);
273 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
278 go[SLAVE] = tick_ops->get_tick();
279 membar_safe("#StoreLoad");
282 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
285 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
286 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
289 extern unsigned long sparc64_ttable_tl0;
290 extern unsigned long kern_locked_tte_data;
291 struct hvtramp_descr *hdesc;
292 unsigned long trampoline_ra;
293 struct trap_per_cpu *tb;
294 u64 tte_vaddr, tte_data;
295 unsigned long hv_err;
298 hdesc = kzalloc(sizeof(*hdesc) +
299 (sizeof(struct hvtramp_mapping) *
300 num_kernel_image_mappings - 1),
303 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
310 hdesc->num_mappings = num_kernel_image_mappings;
312 tb = &trap_block[cpu];
314 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
315 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
317 hdesc->thread_reg = thread_reg;
319 tte_vaddr = (unsigned long) KERNBASE;
320 tte_data = kern_locked_tte_data;
322 for (i = 0; i < hdesc->num_mappings; i++) {
323 hdesc->maps[i].vaddr = tte_vaddr;
324 hdesc->maps[i].tte = tte_data;
325 tte_vaddr += 0x400000;
326 tte_data += 0x400000;
329 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
331 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
332 kimage_addr_to_ra(&sparc64_ttable_tl0),
335 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
336 "gives error %lu\n", hv_err);
340 extern unsigned long sparc64_cpu_startup;
342 /* The OBP cpu startup callback truncates the 3rd arg cookie to
343 * 32-bits (I think) so to be safe we have it read the pointer
344 * contained here so we work on >4GB machines. -DaveM
346 static struct thread_info *cpu_new_thread = NULL;
348 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
350 unsigned long entry =
351 (unsigned long)(&sparc64_cpu_startup);
352 unsigned long cookie =
353 (unsigned long)(&cpu_new_thread);
358 cpu_new_thread = task_thread_info(idle);
360 if (tlb_type == hypervisor) {
361 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
362 if (ldom_domaining_enabled)
363 ldom_startcpu_cpuid(cpu,
364 (unsigned long) cpu_new_thread,
368 prom_startcpu_cpuid(cpu, entry, cookie);
370 struct device_node *dp = of_find_node_by_cpuid(cpu);
372 prom_startcpu(dp->phandle, entry, cookie);
375 for (timeout = 0; timeout < 50000; timeout++) {
384 printk("Processor %d is stuck.\n", cpu);
387 cpu_new_thread = NULL;
394 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
399 if (this_is_starfire) {
400 /* map to real upaid */
401 cpu = (((cpu & 0x3c) << 1) |
402 ((cpu & 0x40) >> 4) |
406 target = (cpu << 14) | 0x70;
408 /* Ok, this is the real Spitfire Errata #54.
409 * One must read back from a UDB internal register
410 * after writes to the UDB interrupt dispatch, but
411 * before the membar Sync for that write.
412 * So we use the high UDB control register (ASI 0x7f,
413 * ADDR 0x20) for the dummy read. -DaveM
416 __asm__ __volatile__(
417 "wrpr %1, %2, %%pstate\n\t"
418 "stxa %4, [%0] %3\n\t"
419 "stxa %5, [%0+%8] %3\n\t"
421 "stxa %6, [%0+%8] %3\n\t"
423 "stxa %%g0, [%7] %3\n\t"
426 "ldxa [%%g1] 0x7f, %%g0\n\t"
429 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
430 "r" (data0), "r" (data1), "r" (data2), "r" (target),
431 "r" (0x10), "0" (tmp)
434 /* NOTE: PSTATE_IE is still clear. */
437 __asm__ __volatile__("ldxa [%%g0] %1, %0"
439 : "i" (ASI_INTR_DISPATCH_STAT));
441 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
448 } while (result & 0x1);
449 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
452 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
453 smp_processor_id(), result);
460 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
462 u64 *mondo, data0, data1, data2;
467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468 cpu_list = __va(tb->cpu_list_pa);
469 mondo = __va(tb->cpu_mondo_block_pa);
473 for (i = 0; i < cnt; i++)
474 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
477 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
478 * packet, but we have no use for that. However we do take advantage of
479 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
481 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
483 int nack_busy_id, is_jbus, need_more;
484 u64 *mondo, pstate, ver, busy_mask;
487 cpu_list = __va(tb->cpu_list_pa);
488 mondo = __va(tb->cpu_mondo_block_pa);
490 /* Unfortunately, someone at Sun had the brilliant idea to make the
491 * busy/nack fields hard-coded by ITID number for this Ultra-III
492 * derivative processor.
494 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
495 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
496 (ver >> 32) == __SERRANO_ID);
498 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
502 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
503 : : "r" (pstate), "i" (PSTATE_IE));
505 /* Setup the dispatch data registers. */
506 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
507 "stxa %1, [%4] %6\n\t"
508 "stxa %2, [%5] %6\n\t"
511 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
512 "r" (0x40), "r" (0x50), "r" (0x60),
520 for (i = 0; i < cnt; i++) {
527 target = (nr << 14) | 0x70;
529 busy_mask |= (0x1UL << (nr * 2));
531 target |= (nack_busy_id << 24);
532 busy_mask |= (0x1UL <<
535 __asm__ __volatile__(
536 "stxa %%g0, [%0] %1\n\t"
539 : "r" (target), "i" (ASI_INTR_W));
541 if (nack_busy_id == 32) {
548 /* Now, poll for completion. */
550 u64 dispatch_stat, nack_mask;
553 stuck = 100000 * nack_busy_id;
554 nack_mask = busy_mask << 1;
556 __asm__ __volatile__("ldxa [%%g0] %1, %0"
557 : "=r" (dispatch_stat)
558 : "i" (ASI_INTR_DISPATCH_STAT));
559 if (!(dispatch_stat & (busy_mask | nack_mask))) {
560 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
562 if (unlikely(need_more)) {
564 for (i = 0; i < cnt; i++) {
565 if (cpu_list[i] == 0xffff)
567 cpu_list[i] = 0xffff;
578 } while (dispatch_stat & busy_mask);
580 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
583 if (dispatch_stat & busy_mask) {
584 /* Busy bits will not clear, continue instead
585 * of freezing up on this cpu.
587 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
588 smp_processor_id(), dispatch_stat);
590 int i, this_busy_nack = 0;
592 /* Delay some random time with interrupts enabled
593 * to prevent deadlock.
595 udelay(2 * nack_busy_id);
597 /* Clear out the mask bits for cpus which did not
600 for (i = 0; i < cnt; i++) {
608 check_mask = (0x2UL << (2*nr));
610 check_mask = (0x2UL <<
612 if ((dispatch_stat & check_mask) == 0)
613 cpu_list[i] = 0xffff;
615 if (this_busy_nack == 64)
624 #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
625 #define MONDO_USEC_WAIT_MIN 2
626 #define MONDO_USEC_WAIT_MAX 100
627 #define MONDO_RETRY_LIMIT 500000
629 /* Multi-cpu list version.
631 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
632 * Sometimes not all cpus receive the mondo, requiring us to re-send
633 * the mondo until all cpus have received, or cpus are truly stuck
634 * unable to receive mondo, and we timeout.
635 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
636 * perform guest service, such as PCIe error handling. Consider the
637 * service time, 1 second overall wait is reasonable for 1 cpu.
638 * Here two in-between mondo check wait time are defined: 2 usec for
639 * single cpu quick turn around and up to 100usec for large cpu count.
640 * Deliver mondo to large number of cpus could take longer, we adjusts
641 * the retry count as long as target cpus are making forward progress.
643 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
645 int this_cpu, tot_cpus, prev_sent, i, rem;
646 int usec_wait, retries, tot_retries;
647 u16 first_cpu = 0xffff;
648 unsigned long xc_rcvd = 0;
649 unsigned long status;
650 int ecpuerror_id = 0;
655 this_cpu = smp_processor_id();
656 cpu_list = __va(tb->cpu_list_pa);
657 usec_wait = cnt * MONDO_USEC_WAIT_MIN;
658 if (usec_wait > MONDO_USEC_WAIT_MAX)
659 usec_wait = MONDO_USEC_WAIT_MAX;
660 retries = tot_retries = 0;
665 int n_sent, mondo_delivered, target_cpu_busy;
667 status = sun4v_cpu_mondo_send(cnt,
669 tb->cpu_mondo_block_pa);
671 /* HV_EOK means all cpus received the xcall, we're done. */
672 if (likely(status == HV_EOK))
675 /* If not these non-fatal errors, panic */
676 if (unlikely((status != HV_EWOULDBLOCK) &&
677 (status != HV_ECPUERROR) &&
678 (status != HV_ENOCPU)))
681 /* First, see if we made any forward progress.
683 * Go through the cpu_list, count the target cpus that have
684 * received our mondo (n_sent), and those that did not (rem).
685 * Re-pack cpu_list with the cpus remain to be retried in the
686 * front - this simplifies tracking the truly stalled cpus.
688 * The hypervisor indicates successful sends by setting
689 * cpu list entries to the value 0xffff.
691 * EWOULDBLOCK means some target cpus did not receive the
692 * mondo and retry usually helps.
694 * ECPUERROR means at least one target cpu is in error state,
695 * it's usually safe to skip the faulty cpu and retry.
697 * ENOCPU means one of the target cpu doesn't belong to the
698 * domain, perhaps offlined which is unexpected, but not
699 * fatal and it's okay to skip the offlined cpu.
703 for (i = 0; i < cnt; i++) {
705 if (likely(cpu == 0xffff)) {
707 } else if ((status == HV_ECPUERROR) &&
708 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
709 ecpuerror_id = cpu + 1;
710 } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
713 cpu_list[rem++] = cpu;
717 /* No cpu remained, we're done. */
721 /* Otherwise, update the cpu count for retry. */
724 /* Record the overall number of mondos received by the
725 * first of the remaining cpus.
727 if (first_cpu != cpu_list[0]) {
728 first_cpu = cpu_list[0];
729 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
732 /* Was any mondo delivered successfully? */
733 mondo_delivered = (n_sent > prev_sent);
736 /* or, was any target cpu busy processing other mondos? */
737 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
738 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
740 /* Retry count is for no progress. If we're making progress,
741 * reset the retry count.
743 if (likely(mondo_delivered || target_cpu_busy)) {
744 tot_retries += retries;
746 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
747 goto fatal_mondo_timeout;
750 /* Delay a little bit to let other cpus catch up on
751 * their cpu mondo queue work.
753 if (!mondo_delivered)
760 if (unlikely(ecpuerror_id > 0)) {
761 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
762 this_cpu, ecpuerror_id - 1);
763 } else if (unlikely(enocpu_id > 0)) {
764 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
765 this_cpu, enocpu_id - 1);
770 /* fatal errors include bad alignment, etc */
771 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
772 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
773 panic("Unexpected SUN4V mondo error %lu\n", status);
776 /* some cpus being non-responsive to the cpu mondo */
777 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
778 this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
779 panic("SUN4V mondo timeout panic\n");
782 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
784 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
786 struct trap_per_cpu *tb;
787 int this_cpu, i, cnt;
792 /* We have to do this whole thing with interrupts fully disabled.
793 * Otherwise if we send an xcall from interrupt context it will
794 * corrupt both our mondo block and cpu list state.
796 * One consequence of this is that we cannot use timeout mechanisms
797 * that depend upon interrupts being delivered locally. So, for
798 * example, we cannot sample jiffies and expect it to advance.
800 * Fortunately, udelay() uses %stick/%tick so we can use that.
802 local_irq_save(flags);
804 this_cpu = smp_processor_id();
805 tb = &trap_block[this_cpu];
807 mondo = __va(tb->cpu_mondo_block_pa);
813 cpu_list = __va(tb->cpu_list_pa);
815 /* Setup the initial cpu list. */
817 for_each_cpu(i, mask) {
818 if (i == this_cpu || !cpu_online(i))
824 xcall_deliver_impl(tb, cnt);
826 local_irq_restore(flags);
829 /* Send cross call to all processors mentioned in MASK_P
830 * except self. Really, there are only two cases currently,
831 * "cpu_online_mask" and "mm_cpumask(mm)".
833 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
835 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
837 xcall_deliver(data0, data1, data2, mask);
840 /* Send cross call to all processors except self. */
841 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
843 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
846 extern unsigned long xcall_sync_tick;
848 static void smp_start_sync_tick_client(int cpu)
850 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
854 extern unsigned long xcall_call_function;
856 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
858 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
861 extern unsigned long xcall_call_function_single;
863 void arch_send_call_function_single_ipi(int cpu)
865 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
869 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
871 clear_softint(1 << irq);
873 generic_smp_call_function_interrupt();
877 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
879 clear_softint(1 << irq);
881 generic_smp_call_function_single_interrupt();
885 static void tsb_sync(void *info)
887 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
888 struct mm_struct *mm = info;
890 /* It is not valid to test "current->active_mm == mm" here.
892 * The value of "current" is not changed atomically with
893 * switch_mm(). But that's OK, we just need to check the
894 * current cpu's trap block PGD physical address.
896 if (tp->pgd_paddr == __pa(mm->pgd))
897 tsb_context_switch(mm);
900 void smp_tsb_sync(struct mm_struct *mm)
902 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
905 extern unsigned long xcall_flush_tlb_mm;
906 extern unsigned long xcall_flush_tlb_page;
907 extern unsigned long xcall_flush_tlb_kernel_range;
908 extern unsigned long xcall_fetch_glob_regs;
909 extern unsigned long xcall_fetch_glob_pmu;
910 extern unsigned long xcall_fetch_glob_pmu_n4;
911 extern unsigned long xcall_receive_signal;
912 extern unsigned long xcall_new_mmu_context_version;
914 extern unsigned long xcall_kgdb_capture;
917 #ifdef DCACHE_ALIASING_POSSIBLE
918 extern unsigned long xcall_flush_dcache_page_cheetah;
920 extern unsigned long xcall_flush_dcache_page_spitfire;
922 static inline void __local_flush_dcache_page(struct page *page)
924 #ifdef DCACHE_ALIASING_POSSIBLE
925 __flush_dcache_page(page_address(page),
926 ((tlb_type == spitfire) &&
927 page_mapping(page) != NULL));
929 if (page_mapping(page) != NULL &&
930 tlb_type == spitfire)
931 __flush_icache_page(__pa(page_address(page)));
935 void smp_flush_dcache_page_impl(struct page *page, int cpu)
939 if (tlb_type == hypervisor)
942 #ifdef CONFIG_DEBUG_DCFLUSH
943 atomic_inc(&dcpage_flushes);
946 this_cpu = get_cpu();
948 if (cpu == this_cpu) {
949 __local_flush_dcache_page(page);
950 } else if (cpu_online(cpu)) {
951 void *pg_addr = page_address(page);
954 if (tlb_type == spitfire) {
955 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
956 if (page_mapping(page) != NULL)
957 data0 |= ((u64)1 << 32);
958 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
959 #ifdef DCACHE_ALIASING_POSSIBLE
960 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
964 xcall_deliver(data0, __pa(pg_addr),
965 (u64) pg_addr, cpumask_of(cpu));
966 #ifdef CONFIG_DEBUG_DCFLUSH
967 atomic_inc(&dcpage_flushes_xcall);
975 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
980 if (tlb_type == hypervisor)
985 #ifdef CONFIG_DEBUG_DCFLUSH
986 atomic_inc(&dcpage_flushes);
989 pg_addr = page_address(page);
990 if (tlb_type == spitfire) {
991 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
992 if (page_mapping(page) != NULL)
993 data0 |= ((u64)1 << 32);
994 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
995 #ifdef DCACHE_ALIASING_POSSIBLE
996 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1000 xcall_deliver(data0, __pa(pg_addr),
1001 (u64) pg_addr, cpu_online_mask);
1002 #ifdef CONFIG_DEBUG_DCFLUSH
1003 atomic_inc(&dcpage_flushes_xcall);
1006 __local_flush_dcache_page(page);
1012 void kgdb_roundup_cpus(unsigned long flags)
1014 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1018 void smp_fetch_global_regs(void)
1020 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1023 void smp_fetch_global_pmu(void)
1025 if (tlb_type == hypervisor &&
1026 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1027 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1029 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1032 /* We know that the window frames of the user have been flushed
1033 * to the stack before we get here because all callers of us
1034 * are flush_tlb_*() routines, and these run after flush_cache_*()
1035 * which performs the flushw.
1037 * mm->cpu_vm_mask is a bit mask of which cpus an address
1038 * space has (potentially) executed on, this is the heuristic
1039 * we use to limit cross calls.
1042 /* This currently is only used by the hugetlb arch pre-fault
1043 * hook on UltraSPARC-III+ and later when changing the pagesize
1044 * bits of the context register for an address space.
1046 void smp_flush_tlb_mm(struct mm_struct *mm)
1048 u32 ctx = CTX_HWBITS(mm->context);
1052 smp_cross_call_masked(&xcall_flush_tlb_mm,
1056 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1061 struct tlb_pending_info {
1064 unsigned long *vaddrs;
1067 static void tlb_pending_func(void *info)
1069 struct tlb_pending_info *t = info;
1071 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1074 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1076 u32 ctx = CTX_HWBITS(mm->context);
1077 struct tlb_pending_info info;
1083 info.vaddrs = vaddrs;
1085 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1088 __flush_tlb_pending(ctx, nr, vaddrs);
1093 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1095 unsigned long context = CTX_HWBITS(mm->context);
1099 smp_cross_call_masked(&xcall_flush_tlb_page,
1103 __flush_tlb_page(context, vaddr);
1108 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1111 end = PAGE_ALIGN(end);
1113 smp_cross_call(&xcall_flush_tlb_kernel_range,
1116 __flush_tlb_kernel_range(start, end);
1121 /* #define CAPTURE_DEBUG */
1122 extern unsigned long xcall_capture;
1124 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1125 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1126 static unsigned long penguins_are_doing_time;
1128 void smp_capture(void)
1130 int result = atomic_add_return(1, &smp_capture_depth);
1133 int ncpus = num_online_cpus();
1135 #ifdef CAPTURE_DEBUG
1136 printk("CPU[%d]: Sending penguins to jail...",
1137 smp_processor_id());
1139 penguins_are_doing_time = 1;
1140 atomic_inc(&smp_capture_registry);
1141 smp_cross_call(&xcall_capture, 0, 0, 0);
1142 while (atomic_read(&smp_capture_registry) != ncpus)
1144 #ifdef CAPTURE_DEBUG
1150 void smp_release(void)
1152 if (atomic_dec_and_test(&smp_capture_depth)) {
1153 #ifdef CAPTURE_DEBUG
1154 printk("CPU[%d]: Giving pardon to "
1155 "imprisoned penguins\n",
1156 smp_processor_id());
1158 penguins_are_doing_time = 0;
1159 membar_safe("#StoreLoad");
1160 atomic_dec(&smp_capture_registry);
1164 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1165 * set, so they can service tlb flush xcalls...
1167 extern void prom_world(int);
1169 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1171 clear_softint(1 << irq);
1175 __asm__ __volatile__("flushw");
1177 atomic_inc(&smp_capture_registry);
1178 membar_safe("#StoreLoad");
1179 while (penguins_are_doing_time)
1181 atomic_dec(&smp_capture_registry);
1187 /* /proc/profile writes can call this, don't __init it please. */
1188 int setup_profiling_timer(unsigned int multiplier)
1193 void __init smp_prepare_cpus(unsigned int max_cpus)
1197 void smp_prepare_boot_cpu(void)
1201 void __init smp_setup_processor_id(void)
1203 if (tlb_type == spitfire)
1204 xcall_deliver_impl = spitfire_xcall_deliver;
1205 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1206 xcall_deliver_impl = cheetah_xcall_deliver;
1208 xcall_deliver_impl = hypervisor_xcall_deliver;
1211 void __init smp_fill_in_cpu_possible_map(void)
1213 int possible_cpus = num_possible_cpus();
1216 if (possible_cpus > nr_cpu_ids)
1217 possible_cpus = nr_cpu_ids;
1219 for (i = 0; i < possible_cpus; i++)
1220 set_cpu_possible(i, true);
1221 for (; i < NR_CPUS; i++)
1222 set_cpu_possible(i, false);
1225 void smp_fill_in_sib_core_maps(void)
1229 for_each_present_cpu(i) {
1232 cpumask_clear(&cpu_core_map[i]);
1233 if (cpu_data(i).core_id == 0) {
1234 cpumask_set_cpu(i, &cpu_core_map[i]);
1238 for_each_present_cpu(j) {
1239 if (cpu_data(i).core_id ==
1240 cpu_data(j).core_id)
1241 cpumask_set_cpu(j, &cpu_core_map[i]);
1245 for_each_present_cpu(i) {
1248 for_each_present_cpu(j) {
1249 if (cpu_data(i).max_cache_id ==
1250 cpu_data(j).max_cache_id)
1251 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1253 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1254 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1258 for_each_present_cpu(i) {
1261 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1262 if (cpu_data(i).proc_id == -1) {
1263 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1267 for_each_present_cpu(j) {
1268 if (cpu_data(i).proc_id ==
1269 cpu_data(j).proc_id)
1270 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1275 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1277 int ret = smp_boot_one_cpu(cpu, tidle);
1280 cpumask_set_cpu(cpu, &smp_commenced_mask);
1281 while (!cpu_online(cpu))
1283 if (!cpu_online(cpu)) {
1286 /* On SUN4V, writes to %tick and %stick are
1289 if (tlb_type != hypervisor)
1290 smp_synchronize_one_tick(cpu);
1296 #ifdef CONFIG_HOTPLUG_CPU
1297 void cpu_play_dead(void)
1299 int cpu = smp_processor_id();
1300 unsigned long pstate;
1304 if (tlb_type == hypervisor) {
1305 struct trap_per_cpu *tb = &trap_block[cpu];
1307 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1308 tb->cpu_mondo_pa, 0);
1309 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1310 tb->dev_mondo_pa, 0);
1311 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1312 tb->resum_mondo_pa, 0);
1313 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1314 tb->nonresum_mondo_pa, 0);
1317 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1318 membar_safe("#Sync");
1320 local_irq_disable();
1322 __asm__ __volatile__(
1323 "rdpr %%pstate, %0\n\t"
1324 "wrpr %0, %1, %%pstate"
1332 int __cpu_disable(void)
1334 int cpu = smp_processor_id();
1338 for_each_cpu(i, &cpu_core_map[cpu])
1339 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1340 cpumask_clear(&cpu_core_map[cpu]);
1342 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1343 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1344 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1353 /* Make sure no interrupts point to this cpu. */
1358 local_irq_disable();
1360 set_cpu_online(cpu, false);
1367 void __cpu_die(unsigned int cpu)
1371 for (i = 0; i < 100; i++) {
1373 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1377 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1378 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1380 #if defined(CONFIG_SUN_LDOMS)
1381 unsigned long hv_err;
1385 hv_err = sun4v_cpu_stop(cpu);
1386 if (hv_err == HV_EOK) {
1387 set_cpu_present(cpu, false);
1390 } while (--limit > 0);
1392 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1400 void __init smp_cpus_done(unsigned int max_cpus)
1404 void smp_send_reschedule(int cpu)
1406 if (cpu == smp_processor_id()) {
1407 WARN_ON_ONCE(preemptible());
1408 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1410 xcall_deliver((u64) &xcall_receive_signal,
1411 0, 0, cpumask_of(cpu));
1415 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1417 clear_softint(1 << irq);
1421 static void stop_this_cpu(void *dummy)
1426 void smp_send_stop(void)
1430 if (tlb_type == hypervisor) {
1431 int this_cpu = smp_processor_id();
1432 #ifdef CONFIG_SERIAL_SUNHV
1433 sunhv_migrate_hvcons_irq(this_cpu);
1435 for_each_online_cpu(cpu) {
1436 if (cpu == this_cpu)
1438 #ifdef CONFIG_SUN_LDOMS
1439 if (ldom_domaining_enabled) {
1440 unsigned long hv_err;
1441 hv_err = sun4v_cpu_stop(cpu);
1443 printk(KERN_ERR "sun4v_cpu_stop() "
1444 "failed err=%lu\n", hv_err);
1447 prom_stopcpu_cpuid(cpu);
1450 smp_call_function(stop_this_cpu, NULL, 0);
1454 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1455 * @cpu: cpu to allocate for
1456 * @size: size allocation in bytes
1459 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1460 * does the right thing for NUMA regardless of the current
1464 * Pointer to the allocated area on success, NULL on failure.
1466 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1469 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1470 #ifdef CONFIG_NEED_MULTIPLE_NODES
1471 int node = cpu_to_node(cpu);
1474 if (!node_online(node) || !NODE_DATA(node)) {
1475 ptr = __alloc_bootmem(size, align, goal);
1476 pr_info("cpu %d has no node %d or node-local memory\n",
1478 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1479 cpu, size, __pa(ptr));
1481 ptr = __alloc_bootmem_node(NODE_DATA(node),
1483 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1484 "%016lx\n", cpu, size, node, __pa(ptr));
1488 return __alloc_bootmem(size, align, goal);
1492 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1494 free_bootmem(__pa(ptr), size);
1497 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1499 if (cpu_to_node(from) == cpu_to_node(to))
1500 return LOCAL_DISTANCE;
1502 return REMOTE_DISTANCE;
1505 static void __init pcpu_populate_pte(unsigned long addr)
1507 pgd_t *pgd = pgd_offset_k(addr);
1511 if (pgd_none(*pgd)) {
1514 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1515 pgd_populate(&init_mm, pgd, new);
1518 pud = pud_offset(pgd, addr);
1519 if (pud_none(*pud)) {
1522 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1523 pud_populate(&init_mm, pud, new);
1526 pmd = pmd_offset(pud, addr);
1527 if (!pmd_present(*pmd)) {
1530 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1531 pmd_populate_kernel(&init_mm, pmd, new);
1535 void __init setup_per_cpu_areas(void)
1537 unsigned long delta;
1541 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1542 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1543 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1548 pr_warning("PERCPU: %s allocator failed (%d), "
1549 "falling back to page size\n",
1550 pcpu_fc_names[pcpu_chosen_fc], rc);
1553 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1558 panic("cannot initialize percpu area (err=%d)", rc);
1560 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1561 for_each_possible_cpu(cpu)
1562 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1564 /* Setup %g5 for the boot cpu. */
1565 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1567 of_fill_in_cpu_data();
1568 if (tlb_type == hypervisor)
1569 mdesc_fill_in_cpu_data(cpu_all_mask);