1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2000, 2001 Kanoj Sarcar
5 * Copyright (C) 2000, 2001 Ralf Baechle
6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
9 #include <linux/cache.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/profile.h>
14 #include <linux/smp.h>
15 #include <linux/spinlock.h>
16 #include <linux/threads.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/timex.h>
20 #include <linux/sched/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpu.h>
23 #include <linux/err.h>
24 #include <linux/ftrace.h>
25 #include <linux/irqdomain.h>
27 #include <linux/of_irq.h>
29 #include <linux/atomic.h>
31 #include <asm/ginvt.h>
32 #include <asm/processor.h>
34 #include <asm/r4k-timer.h>
35 #include <asm/mips-cps.h>
36 #include <asm/mmu_context.h>
38 #include <asm/setup.h>
41 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
42 EXPORT_SYMBOL(__cpu_number_map);
44 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
45 EXPORT_SYMBOL(__cpu_logical_map);
47 /* Number of TCs (or siblings in Intel speak) per CPU core */
48 int smp_num_siblings = 1;
49 EXPORT_SYMBOL(smp_num_siblings);
51 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
52 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
53 EXPORT_SYMBOL(cpu_sibling_map);
55 /* representing the core map of multi-core chips of each logical CPU */
56 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
57 EXPORT_SYMBOL(cpu_core_map);
59 static DECLARE_COMPLETION(cpu_starting);
60 static DECLARE_COMPLETION(cpu_running);
63 * A logical cpu mask containing only one VPE per core to
64 * reduce the number of IPIs on large MT systems.
66 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
67 EXPORT_SYMBOL(cpu_foreign_map);
69 /* representing cpus for which sibling maps can be computed */
70 static cpumask_t cpu_sibling_setup_map;
72 /* representing cpus for which core maps can be computed */
73 static cpumask_t cpu_core_setup_map;
75 cpumask_t cpu_coherent_mask;
77 unsigned int smp_max_threads __initdata = UINT_MAX;
79 static int __init early_nosmt(char *s)
84 early_param("nosmt", early_nosmt);
86 static int __init early_smt(char *s)
88 get_option(&s, &smp_max_threads);
89 /* Ensure at least one thread is available */
90 smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX);
93 early_param("smt", early_smt);
95 #ifdef CONFIG_GENERIC_IRQ_IPI
96 static struct irq_desc *call_desc;
97 static struct irq_desc *sched_desc;
100 static inline void set_cpu_sibling_map(int cpu)
104 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
106 if (smp_num_siblings > 1) {
107 for_each_cpu(i, &cpu_sibling_setup_map) {
108 if (cpus_are_siblings(cpu, i)) {
109 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
110 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
114 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
117 static inline void set_cpu_core_map(int cpu)
121 cpumask_set_cpu(cpu, &cpu_core_setup_map);
123 for_each_cpu(i, &cpu_core_setup_map) {
124 if (cpu_data[cpu].package == cpu_data[i].package) {
125 cpumask_set_cpu(i, &cpu_core_map[cpu]);
126 cpumask_set_cpu(cpu, &cpu_core_map[i]);
132 * Calculate a new cpu_foreign_map mask whenever a
133 * new cpu appears or disappears.
135 void calculate_cpu_foreign_map(void)
137 int i, k, core_present;
138 cpumask_t temp_foreign_map;
140 /* Re-calculate the mask */
141 cpumask_clear(&temp_foreign_map);
142 for_each_online_cpu(i) {
144 for_each_cpu(k, &temp_foreign_map)
145 if (cpus_are_siblings(i, k))
148 cpumask_set_cpu(i, &temp_foreign_map);
151 for_each_online_cpu(i)
152 cpumask_andnot(&cpu_foreign_map[i],
153 &temp_foreign_map, &cpu_sibling_map[i]);
156 const struct plat_smp_ops *mp_ops;
157 EXPORT_SYMBOL(mp_ops);
159 void register_smp_ops(const struct plat_smp_ops *ops)
162 printk(KERN_WARNING "Overriding previously set SMP ops\n");
167 #ifdef CONFIG_GENERIC_IRQ_IPI
168 void mips_smp_send_ipi_single(int cpu, unsigned int action)
170 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
173 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
179 local_irq_save(flags);
182 case SMP_CALL_FUNCTION:
183 __ipi_send_mask(call_desc, mask);
186 case SMP_RESCHEDULE_YOURSELF:
187 __ipi_send_mask(sched_desc, mask);
194 if (mips_cpc_present()) {
195 for_each_cpu(cpu, mask) {
196 if (cpus_are_siblings(cpu, smp_processor_id()))
199 core = cpu_core(&cpu_data[cpu]);
201 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
202 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
203 mips_cpc_lock_other(core);
204 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
205 mips_cpc_unlock_other();
206 mips_cm_unlock_other();
211 local_irq_restore(flags);
215 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
222 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
224 generic_smp_call_function_interrupt();
229 static void smp_ipi_init_one(unsigned int virq, const char *name,
230 irq_handler_t handler)
234 irq_set_handler(virq, handle_percpu_irq);
235 ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
239 static unsigned int call_virq, sched_virq;
241 int mips_smp_ipi_allocate(const struct cpumask *mask)
244 struct irq_domain *ipidomain;
245 struct device_node *node;
247 node = of_irq_find_parent(of_root);
248 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
251 * Some platforms have half DT setup. So if we found irq node but
252 * didn't find an ipidomain, try to search for one that is not in the
255 if (node && !ipidomain)
256 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
259 * There are systems which use IPI IRQ domains, but only have one
260 * registered when some runtime condition is met. For example a Malta
261 * kernel may include support for GIC & CPU interrupt controller IPI
262 * IRQ domains, but if run on a system with no GIC & no MT ASE then
263 * neither will be supported or registered.
265 * We only have a problem if we're actually using multiple CPUs so fail
266 * loudly if that is the case. Otherwise simply return, skipping IPI
267 * setup, if we're running with only a single CPU.
270 BUG_ON(num_present_cpus() > 1);
274 virq = irq_reserve_ipi(ipidomain, mask);
279 virq = irq_reserve_ipi(ipidomain, mask);
284 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
287 for_each_cpu(cpu, mask) {
288 smp_ipi_init_one(call_virq + cpu, "IPI call",
290 smp_ipi_init_one(sched_virq + cpu, "IPI resched",
291 ipi_resched_interrupt);
294 smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
295 smp_ipi_init_one(sched_virq, "IPI resched",
296 ipi_resched_interrupt);
302 int mips_smp_ipi_free(const struct cpumask *mask)
304 struct irq_domain *ipidomain;
305 struct device_node *node;
307 node = of_irq_find_parent(of_root);
308 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
311 * Some platforms have half DT setup. So if we found irq node but
312 * didn't find an ipidomain, try to search for one that is not in the
315 if (node && !ipidomain)
316 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
320 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
323 for_each_cpu(cpu, mask) {
324 free_irq(call_virq + cpu, NULL);
325 free_irq(sched_virq + cpu, NULL);
328 irq_destroy_ipi(call_virq, mask);
329 irq_destroy_ipi(sched_virq, mask);
334 static int __init mips_smp_ipi_init(void)
336 if (num_possible_cpus() == 1)
339 mips_smp_ipi_allocate(cpu_possible_mask);
341 call_desc = irq_to_desc(call_virq);
342 sched_desc = irq_to_desc(sched_virq);
346 early_initcall(mips_smp_ipi_init);
350 * First C code run on the secondary CPUs after being started up by
353 asmlinkage void start_secondary(void)
355 unsigned int cpu = raw_smp_processor_id();
358 per_cpu_trap_init(false);
359 rcutree_report_cpu_starting(cpu);
360 mips_clockevent_init();
361 mp_ops->init_secondary();
366 * XXX parity protection should be folded in here when it's converted
367 * to an option instead of something based on .cputype
371 cpu_data[cpu].udelay_val = loops_per_jiffy;
373 set_cpu_sibling_map(cpu);
374 set_cpu_core_map(cpu);
376 cpumask_set_cpu(cpu, &cpu_coherent_mask);
377 notify_cpu_starting(cpu);
379 /* Notify boot CPU that we're starting & ready to sync counters */
380 complete(&cpu_starting);
382 synchronise_count_slave(cpu);
384 /* The CPU is running and counters synchronised, now mark it online */
385 set_cpu_online(cpu, true);
387 calculate_cpu_foreign_map();
390 * Notify boot CPU that we're up & online and it can safely return
393 complete(&cpu_running);
396 * irq will be enabled in ->smp_finish(), enabling it too early
399 WARN_ON_ONCE(!irqs_disabled());
400 mp_ops->smp_finish();
402 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
405 static void stop_this_cpu(void *dummy)
411 set_cpu_online(smp_processor_id(), false);
412 calculate_cpu_foreign_map();
417 void smp_send_stop(void)
419 smp_call_function(stop_this_cpu, NULL, 0);
422 void __init smp_cpus_done(unsigned int max_cpus)
426 /* called from main before smp_init() */
427 void __init smp_prepare_cpus(unsigned int max_cpus)
429 init_new_context(current, &init_mm);
430 current_thread_info()->cpu = 0;
431 mp_ops->prepare_cpus(max_cpus);
432 set_cpu_sibling_map(0);
434 calculate_cpu_foreign_map();
435 #ifndef CONFIG_HOTPLUG_CPU
436 init_cpu_present(cpu_possible_mask);
438 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
441 /* preload SMP state for boot cpu */
442 void smp_prepare_boot_cpu(void)
444 if (mp_ops->prepare_boot_cpu)
445 mp_ops->prepare_boot_cpu();
446 set_cpu_possible(0, true);
447 set_cpu_online(0, true);
450 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
454 err = mp_ops->boot_secondary(cpu, tidle);
458 /* Wait for CPU to start and be ready to sync counters */
459 if (!wait_for_completion_timeout(&cpu_starting,
460 msecs_to_jiffies(1000))) {
461 pr_crit("CPU%u: failed to start\n", cpu);
465 synchronise_count_master(cpu);
467 /* Wait for CPU to finish startup & mark itself online before return */
468 wait_for_completion(&cpu_running);
472 #ifdef CONFIG_PROFILING
473 /* Not really SMP stuff ... */
474 int setup_profiling_timer(unsigned int multiplier)
480 static void flush_tlb_all_ipi(void *info)
482 local_flush_tlb_all();
485 void flush_tlb_all(void)
491 instruction_hazard();
496 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
499 static void flush_tlb_mm_ipi(void *mm)
501 drop_mmu_context((struct mm_struct *)mm);
505 * Special Variant of smp_call_function for use by TLB functions:
508 * o collapses to normal function call on UP kernels
509 * o collapses to normal function call on systems with a single shared
512 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
514 smp_call_function(func, info, 1);
517 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
521 smp_on_other_tlbs(func, info);
528 * The following tlb flush calls are invoked when old translations are
529 * being torn down, or pte attributes are changing. For single threaded
530 * address spaces, a new context is obtained on the current cpu, and tlb
531 * context on other cpus are invalidated to force a new context allocation
532 * at switch_mm time, should the mm ever be used on other cpus. For
533 * multithreaded address spaces, inter-CPU interrupts have to be sent.
534 * Another case where inter-CPU interrupts are required is when the target
535 * mm might be active on another cpu (eg debuggers doing the flushes on
536 * behalf of debugees, kswapd stealing pages from another process etc).
540 void flush_tlb_mm(struct mm_struct *mm)
545 if (atomic_read(&mm->mm_users) == 0)
546 return; /* happens as a result of exit_mmap() */
552 * No need to worry about other CPUs - the ginvt in
553 * drop_mmu_context() will be globalized.
555 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
556 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
560 for_each_online_cpu(cpu) {
561 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
562 set_cpu_context(cpu, mm, 0);
565 drop_mmu_context(mm);
570 struct flush_tlb_data {
571 struct vm_area_struct *vma;
576 static void flush_tlb_range_ipi(void *info)
578 struct flush_tlb_data *fd = info;
580 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
583 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
585 struct mm_struct *mm = vma->vm_mm;
592 old_mmid = read_c0_memorymapid();
593 write_c0_memorymapid(cpu_asid(0, mm));
595 addr = round_down(start, PAGE_SIZE * 2);
596 end = round_up(end, PAGE_SIZE * 2);
600 addr += PAGE_SIZE * 2;
601 } while (addr < end);
602 write_c0_memorymapid(old_mmid);
603 instruction_hazard();
605 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
606 struct flush_tlb_data fd = {
612 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
613 local_flush_tlb_range(vma, start, end);
616 int exec = vma->vm_flags & VM_EXEC;
618 for_each_online_cpu(cpu) {
620 * flush_cache_range() will only fully flush icache if
621 * the VMA is executable, otherwise we must invalidate
622 * ASID without it appearing to has_valid_asid() as if
623 * mm has been completely unused by that CPU.
625 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
626 set_cpu_context(cpu, mm, !exec);
628 local_flush_tlb_range(vma, start, end);
633 static void flush_tlb_kernel_range_ipi(void *info)
635 struct flush_tlb_data *fd = info;
637 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
640 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
642 struct flush_tlb_data fd = {
647 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
650 static void flush_tlb_page_ipi(void *info)
652 struct flush_tlb_data *fd = info;
654 local_flush_tlb_page(fd->vma, fd->addr1);
657 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
664 old_mmid = read_c0_memorymapid();
665 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
669 write_c0_memorymapid(old_mmid);
670 instruction_hazard();
672 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
673 (current->mm != vma->vm_mm)) {
674 struct flush_tlb_data fd = {
679 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
680 local_flush_tlb_page(vma, page);
684 for_each_online_cpu(cpu) {
686 * flush_cache_page() only does partial flushes, so
687 * invalidate ASID without it appearing to
688 * has_valid_asid() as if mm has been completely unused
691 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
692 set_cpu_context(cpu, vma->vm_mm, 1);
694 local_flush_tlb_page(vma, page);
699 static void flush_tlb_one_ipi(void *info)
701 unsigned long vaddr = (unsigned long) info;
703 local_flush_tlb_one(vaddr);
706 void flush_tlb_one(unsigned long vaddr)
708 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
711 EXPORT_SYMBOL(flush_tlb_page);
712 EXPORT_SYMBOL(flush_tlb_one);
714 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
715 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
717 if (mp_ops->cleanup_dead_cpu)
718 mp_ops->cleanup_dead_cpu(cpu);
722 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
724 static void tick_broadcast_callee(void *info)
726 tick_receive_broadcast();
729 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
730 CSD_INIT(tick_broadcast_callee, NULL);
732 void tick_broadcast(const struct cpumask *mask)
734 call_single_data_t *csd;
737 for_each_cpu(cpu, mask) {
738 csd = &per_cpu(tick_broadcast_csd, cpu);
739 smp_call_function_single_async(cpu, csd);
743 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */