1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 * Copyright (C) 2000, 2001 Kanoj Sarcar
7 * Copyright (C) 2000, 2001 Ralf Baechle
8 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
11 #include <linux/cpu.h>
12 #include <linux/cpumask.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/smp.h>
17 #include <linux/threads.h>
18 #include <linux/export.h>
19 #include <linux/time.h>
20 #include <linux/tracepoint.h>
21 #include <linux/sched/hotplug.h>
22 #include <linux/sched/task_stack.h>
26 #include <asm/loongson.h>
27 #include <asm/mmu_context.h>
29 #include <asm/processor.h>
30 #include <asm/setup.h>
33 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
34 EXPORT_SYMBOL(__cpu_number_map);
36 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
37 EXPORT_SYMBOL(__cpu_logical_map);
39 /* Number of threads (siblings) per CPU core */
40 int smp_num_siblings = 1;
41 EXPORT_SYMBOL(smp_num_siblings);
43 /* Representing the threads (siblings) of each logical CPU */
44 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
45 EXPORT_SYMBOL(cpu_sibling_map);
47 /* Representing the core map of multi-core chips of each logical CPU */
48 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
49 EXPORT_SYMBOL(cpu_core_map);
51 static DECLARE_COMPLETION(cpu_starting);
52 static DECLARE_COMPLETION(cpu_running);
55 * A logcal cpu mask containing only one VPE per core to
56 * reduce the number of IPIs on large MT systems.
58 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
59 EXPORT_SYMBOL(cpu_foreign_map);
61 /* representing cpus for which sibling maps can be computed */
62 static cpumask_t cpu_sibling_setup_map;
64 /* representing cpus for which core maps can be computed */
65 static cpumask_t cpu_core_setup_map;
67 struct secondary_data cpuboot_data;
68 static DEFINE_PER_CPU(int, cpu_state);
75 static const char *ipi_types[NR_IPI] __tracepoint_string = {
76 [IPI_RESCHEDULE] = "Rescheduling interrupts",
77 [IPI_CALL_FUNCTION] = "Function call interrupts",
80 void show_ipi_list(struct seq_file *p, int prec)
84 for (i = 0; i < NR_IPI; i++) {
85 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
86 for_each_online_cpu(cpu)
87 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
88 seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
92 /* Send mailbox buffer via Mail_Send */
93 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
97 /* Send high 32 bits */
98 val = IOCSR_MBUF_SEND_BLOCKING;
99 val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
100 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
101 val |= (data & IOCSR_MBUF_SEND_H32_MASK);
102 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
104 /* Send low 32 bits */
105 val = IOCSR_MBUF_SEND_BLOCKING;
106 val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
107 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
108 val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
109 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
112 static u32 ipi_read_clear(int cpu)
116 /* Load the ipi register to figure out what we're supposed to do */
117 action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
118 /* Clear the ipi register to clear the interrupt */
119 iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
125 static void ipi_write_action(int cpu, u32 action)
127 unsigned int irq = 0;
129 while ((irq = ffs(action))) {
130 uint32_t val = IOCSR_IPI_SEND_BLOCKING;
133 val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
134 iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
135 action &= ~BIT(irq - 1);
139 void loongson_send_ipi_single(int cpu, unsigned int action)
141 ipi_write_action(cpu_logical_map(cpu), (u32)action);
144 void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
148 for_each_cpu(i, mask)
149 ipi_write_action(cpu_logical_map(i), (u32)action);
153 * This function sends a 'reschedule' IPI to another CPU.
154 * it goes straight through and wastes no time serializing
155 * anything. Worst case is that we lose a reschedule ...
157 void smp_send_reschedule(int cpu)
159 loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
161 EXPORT_SYMBOL_GPL(smp_send_reschedule);
163 irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
166 unsigned int cpu = smp_processor_id();
168 action = ipi_read_clear(cpu_logical_map(cpu));
170 if (action & SMP_RESCHEDULE) {
172 per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
175 if (action & SMP_CALL_FUNCTION) {
176 generic_smp_call_function_interrupt();
177 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
183 void __init loongson_smp_setup(void)
185 cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
186 cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
188 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
189 pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
192 void __init loongson_prepare_cpus(unsigned int max_cpus)
196 for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
197 set_cpu_present(i, true);
198 csr_mail_send(0, __cpu_logical_map[i], 0);
201 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
205 * Setup the PC, SP, and TP of a secondary processor and start it running!
207 void loongson_boot_secondary(int cpu, struct task_struct *idle)
211 pr_info("Booting CPU#%d...\n", cpu);
213 entry = __pa_symbol((unsigned long)&smpboot_entry);
214 cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
215 cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
217 csr_mail_send(entry, cpu_logical_map(cpu), 0);
219 loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
223 * SMP init and finish on secondary CPUs
225 void loongson_init_secondary(void)
227 unsigned int cpu = smp_processor_id();
228 unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
229 ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
231 change_csr_ecfg(ECFG0_IM, imask);
233 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
238 per_cpu(cpu_state, cpu) = CPU_ONLINE;
240 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
241 cpu_data[cpu].package =
242 cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
245 void loongson_smp_finish(void)
248 iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
249 pr_info("CPU#%d finished\n", smp_processor_id());
252 #ifdef CONFIG_HOTPLUG_CPU
254 int loongson_cpu_disable(void)
257 unsigned int cpu = smp_processor_id();
263 numa_remove_cpu(cpu);
265 set_cpu_online(cpu, false);
266 calculate_cpu_foreign_map();
267 local_irq_save(flags);
268 irq_migrate_all_off_this_cpu();
269 clear_csr_ecfg(ECFG0_IM);
270 local_irq_restore(flags);
271 local_flush_tlb_all();
276 void loongson_cpu_die(unsigned int cpu)
278 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
286 register uint64_t addr;
287 register void (*init_fn)(void);
291 set_csr_ecfg(ECFGF_IPI);
292 __this_cpu_write(cpu_state, CPU_DEAD);
296 __asm__ __volatile__("idle 0\n\t");
297 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
301 init_fn = (void *)TO_CACHE(addr);
302 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
315 static int loongson_ipi_suspend(void)
320 static void loongson_ipi_resume(void)
322 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
325 static struct syscore_ops loongson_ipi_syscore_ops = {
326 .resume = loongson_ipi_resume,
327 .suspend = loongson_ipi_suspend,
331 * Enable boot cpu ipi before enabling nonboot cpus
332 * during syscore_resume.
334 static int __init ipi_pm_init(void)
336 register_syscore_ops(&loongson_ipi_syscore_ops);
340 core_initcall(ipi_pm_init);
343 static inline void set_cpu_sibling_map(int cpu)
347 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
349 if (smp_num_siblings <= 1)
350 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
352 for_each_cpu(i, &cpu_sibling_setup_map) {
353 if (cpus_are_siblings(cpu, i)) {
354 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
355 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
361 static inline void set_cpu_core_map(int cpu)
365 cpumask_set_cpu(cpu, &cpu_core_setup_map);
367 for_each_cpu(i, &cpu_core_setup_map) {
368 if (cpu_data[cpu].package == cpu_data[i].package) {
369 cpumask_set_cpu(i, &cpu_core_map[cpu]);
370 cpumask_set_cpu(cpu, &cpu_core_map[i]);
376 * Calculate a new cpu_foreign_map mask whenever a
377 * new cpu appears or disappears.
379 void calculate_cpu_foreign_map(void)
381 int i, k, core_present;
382 cpumask_t temp_foreign_map;
384 /* Re-calculate the mask */
385 cpumask_clear(&temp_foreign_map);
386 for_each_online_cpu(i) {
388 for_each_cpu(k, &temp_foreign_map)
389 if (cpus_are_siblings(i, k))
392 cpumask_set_cpu(i, &temp_foreign_map);
395 for_each_online_cpu(i)
396 cpumask_andnot(&cpu_foreign_map[i],
397 &temp_foreign_map, &cpu_sibling_map[i]);
400 /* Preload SMP state for boot cpu */
401 void smp_prepare_boot_cpu(void)
403 unsigned int cpu, node, rr_node;
405 set_cpu_possible(0, true);
406 set_cpu_online(0, true);
407 set_my_cpu_offset(per_cpu_offset(0));
409 rr_node = first_node(node_online_map);
410 for_each_possible_cpu(cpu) {
411 node = early_cpu_to_node(cpu);
414 * The mapping between present cpus and nodes has been
415 * built during MADT and SRAT parsing.
417 * If possible cpus = present cpus here, early_cpu_to_node
418 * will return valid node.
420 * If possible cpus > present cpus here (e.g. some possible
421 * cpus will be added by cpu-hotplug later), for possible but
422 * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
423 * and we just map them to online nodes in round-robin way.
424 * Once hotplugged, new correct mapping will be built for them.
426 if (node != NUMA_NO_NODE)
427 set_cpu_numa_node(cpu, node);
429 set_cpu_numa_node(cpu, rr_node);
430 rr_node = next_node_in(rr_node, node_online_map);
435 /* called from main before smp_init() */
436 void __init smp_prepare_cpus(unsigned int max_cpus)
438 init_new_context(current, &init_mm);
439 current_thread_info()->cpu = 0;
440 loongson_prepare_cpus(max_cpus);
441 set_cpu_sibling_map(0);
443 calculate_cpu_foreign_map();
444 #ifndef CONFIG_HOTPLUG_CPU
445 init_cpu_present(cpu_possible_mask);
449 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
451 loongson_boot_secondary(cpu, tidle);
453 /* Wait for CPU to start and be ready to sync counters */
454 if (!wait_for_completion_timeout(&cpu_starting,
455 msecs_to_jiffies(5000))) {
456 pr_crit("CPU%u: failed to start\n", cpu);
460 /* Wait for CPU to finish startup & mark itself online before return */
461 wait_for_completion(&cpu_running);
467 * First C code run on the secondary CPUs after being started up by
470 asmlinkage void start_secondary(void)
475 cpu = raw_smp_processor_id();
476 set_my_cpu_offset(per_cpu_offset(cpu));
479 constant_clockevent_init();
480 loongson_init_secondary();
482 set_cpu_sibling_map(cpu);
483 set_cpu_core_map(cpu);
485 notify_cpu_starting(cpu);
487 /* Notify boot CPU that we're starting */
488 complete(&cpu_starting);
490 /* The CPU is running, now mark it online */
491 set_cpu_online(cpu, true);
493 calculate_cpu_foreign_map();
496 * Notify boot CPU that we're up & online and it can safely return
499 complete(&cpu_running);
502 * irq will be enabled in loongson_smp_finish(), enabling it too
503 * early is dangerous.
505 WARN_ON_ONCE(!irqs_disabled());
506 loongson_smp_finish();
508 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
511 void __init smp_cpus_done(unsigned int max_cpus)
515 static void stop_this_cpu(void *dummy)
517 set_cpu_online(smp_processor_id(), false);
518 calculate_cpu_foreign_map();
523 void smp_send_stop(void)
525 smp_call_function(stop_this_cpu, NULL, 0);
528 int setup_profiling_timer(unsigned int multiplier)
533 static void flush_tlb_all_ipi(void *info)
535 local_flush_tlb_all();
538 void flush_tlb_all(void)
540 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
543 static void flush_tlb_mm_ipi(void *mm)
545 local_flush_tlb_mm((struct mm_struct *)mm);
548 void flush_tlb_mm(struct mm_struct *mm)
550 if (atomic_read(&mm->mm_users) == 0)
551 return; /* happens as a result of exit_mmap() */
555 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
556 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
560 for_each_online_cpu(cpu) {
561 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
562 cpu_context(cpu, mm) = 0;
564 local_flush_tlb_mm(mm);
570 struct flush_tlb_data {
571 struct vm_area_struct *vma;
576 static void flush_tlb_range_ipi(void *info)
578 struct flush_tlb_data *fd = info;
580 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
583 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
585 struct mm_struct *mm = vma->vm_mm;
588 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
589 struct flush_tlb_data fd = {
595 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
599 for_each_online_cpu(cpu) {
600 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
601 cpu_context(cpu, mm) = 0;
603 local_flush_tlb_range(vma, start, end);
608 static void flush_tlb_kernel_range_ipi(void *info)
610 struct flush_tlb_data *fd = info;
612 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
615 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
617 struct flush_tlb_data fd = {
622 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
625 static void flush_tlb_page_ipi(void *info)
627 struct flush_tlb_data *fd = info;
629 local_flush_tlb_page(fd->vma, fd->addr1);
632 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
635 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
636 struct flush_tlb_data fd = {
641 on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
645 for_each_online_cpu(cpu) {
646 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
647 cpu_context(cpu, vma->vm_mm) = 0;
649 local_flush_tlb_page(vma, page);
653 EXPORT_SYMBOL(flush_tlb_page);
655 static void flush_tlb_one_ipi(void *info)
657 unsigned long vaddr = (unsigned long) info;
659 local_flush_tlb_one(vaddr);
662 void flush_tlb_one(unsigned long vaddr)
664 on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
666 EXPORT_SYMBOL(flush_tlb_one);