GNU Linux-libre 6.9.1-gnu
[releases.git] / arch / mips / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) 2000, 2001 Kanoj Sarcar
5  * Copyright (C) 2000, 2001 Ralf Baechle
6  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
8  */
9 #include <linux/cache.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/profile.h>
14 #include <linux/smp.h>
15 #include <linux/spinlock.h>
16 #include <linux/threads.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/timex.h>
20 #include <linux/sched/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpu.h>
23 #include <linux/err.h>
24 #include <linux/ftrace.h>
25 #include <linux/irqdomain.h>
26 #include <linux/of.h>
27 #include <linux/of_irq.h>
28
29 #include <linux/atomic.h>
30 #include <asm/cpu.h>
31 #include <asm/ginvt.h>
32 #include <asm/processor.h>
33 #include <asm/idle.h>
34 #include <asm/r4k-timer.h>
35 #include <asm/mips-cps.h>
36 #include <asm/mmu_context.h>
37 #include <asm/time.h>
38 #include <asm/setup.h>
39 #include <asm/maar.h>
40
41 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
42 EXPORT_SYMBOL(__cpu_number_map);
43
44 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
45 EXPORT_SYMBOL(__cpu_logical_map);
46
47 /* Number of TCs (or siblings in Intel speak) per CPU core */
48 int smp_num_siblings = 1;
49 EXPORT_SYMBOL(smp_num_siblings);
50
51 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
52 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
53 EXPORT_SYMBOL(cpu_sibling_map);
54
55 /* representing the core map of multi-core chips of each logical CPU */
56 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
57 EXPORT_SYMBOL(cpu_core_map);
58
59 static DECLARE_COMPLETION(cpu_starting);
60 static DECLARE_COMPLETION(cpu_running);
61
62 /*
63  * A logical cpu mask containing only one VPE per core to
64  * reduce the number of IPIs on large MT systems.
65  */
66 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
67 EXPORT_SYMBOL(cpu_foreign_map);
68
69 /* representing cpus for which sibling maps can be computed */
70 static cpumask_t cpu_sibling_setup_map;
71
72 /* representing cpus for which core maps can be computed */
73 static cpumask_t cpu_core_setup_map;
74
75 cpumask_t cpu_coherent_mask;
76
77 unsigned int smp_max_threads __initdata = UINT_MAX;
78
79 static int __init early_nosmt(char *s)
80 {
81         smp_max_threads = 1;
82         return 0;
83 }
84 early_param("nosmt", early_nosmt);
85
86 static int __init early_smt(char *s)
87 {
88         get_option(&s, &smp_max_threads);
89         /* Ensure at least one thread is available */
90         smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX);
91         return 0;
92 }
93 early_param("smt", early_smt);
94
95 #ifdef CONFIG_GENERIC_IRQ_IPI
96 static struct irq_desc *call_desc;
97 static struct irq_desc *sched_desc;
98 #endif
99
100 static inline void set_cpu_sibling_map(int cpu)
101 {
102         int i;
103
104         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
105
106         if (smp_num_siblings > 1) {
107                 for_each_cpu(i, &cpu_sibling_setup_map) {
108                         if (cpus_are_siblings(cpu, i)) {
109                                 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
110                                 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
111                         }
112                 }
113         } else
114                 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
115 }
116
117 static inline void set_cpu_core_map(int cpu)
118 {
119         int i;
120
121         cpumask_set_cpu(cpu, &cpu_core_setup_map);
122
123         for_each_cpu(i, &cpu_core_setup_map) {
124                 if (cpu_data[cpu].package == cpu_data[i].package) {
125                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
126                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
127                 }
128         }
129 }
130
131 /*
132  * Calculate a new cpu_foreign_map mask whenever a
133  * new cpu appears or disappears.
134  */
135 void calculate_cpu_foreign_map(void)
136 {
137         int i, k, core_present;
138         cpumask_t temp_foreign_map;
139
140         /* Re-calculate the mask */
141         cpumask_clear(&temp_foreign_map);
142         for_each_online_cpu(i) {
143                 core_present = 0;
144                 for_each_cpu(k, &temp_foreign_map)
145                         if (cpus_are_siblings(i, k))
146                                 core_present = 1;
147                 if (!core_present)
148                         cpumask_set_cpu(i, &temp_foreign_map);
149         }
150
151         for_each_online_cpu(i)
152                 cpumask_andnot(&cpu_foreign_map[i],
153                                &temp_foreign_map, &cpu_sibling_map[i]);
154 }
155
156 const struct plat_smp_ops *mp_ops;
157 EXPORT_SYMBOL(mp_ops);
158
159 void register_smp_ops(const struct plat_smp_ops *ops)
160 {
161         if (mp_ops)
162                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
163
164         mp_ops = ops;
165 }
166
167 #ifdef CONFIG_GENERIC_IRQ_IPI
168 void mips_smp_send_ipi_single(int cpu, unsigned int action)
169 {
170         mips_smp_send_ipi_mask(cpumask_of(cpu), action);
171 }
172
173 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
174 {
175         unsigned long flags;
176         unsigned int core;
177         int cpu;
178
179         local_irq_save(flags);
180
181         switch (action) {
182         case SMP_CALL_FUNCTION:
183                 __ipi_send_mask(call_desc, mask);
184                 break;
185
186         case SMP_RESCHEDULE_YOURSELF:
187                 __ipi_send_mask(sched_desc, mask);
188                 break;
189
190         default:
191                 BUG();
192         }
193
194         if (mips_cpc_present()) {
195                 for_each_cpu(cpu, mask) {
196                         if (cpus_are_siblings(cpu, smp_processor_id()))
197                                 continue;
198
199                         core = cpu_core(&cpu_data[cpu]);
200
201                         while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
202                                 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
203                                 mips_cpc_lock_other(core);
204                                 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
205                                 mips_cpc_unlock_other();
206                                 mips_cm_unlock_other();
207                         }
208                 }
209         }
210
211         local_irq_restore(flags);
212 }
213
214
215 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
216 {
217         scheduler_ipi();
218
219         return IRQ_HANDLED;
220 }
221
222 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
223 {
224         generic_smp_call_function_interrupt();
225
226         return IRQ_HANDLED;
227 }
228
229 static void smp_ipi_init_one(unsigned int virq, const char *name,
230                              irq_handler_t handler)
231 {
232         int ret;
233
234         irq_set_handler(virq, handle_percpu_irq);
235         ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
236         BUG_ON(ret);
237 }
238
239 static unsigned int call_virq, sched_virq;
240
241 int mips_smp_ipi_allocate(const struct cpumask *mask)
242 {
243         int virq;
244         struct irq_domain *ipidomain;
245         struct device_node *node;
246
247         node = of_irq_find_parent(of_root);
248         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
249
250         /*
251          * Some platforms have half DT setup. So if we found irq node but
252          * didn't find an ipidomain, try to search for one that is not in the
253          * DT.
254          */
255         if (node && !ipidomain)
256                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
257
258         /*
259          * There are systems which use IPI IRQ domains, but only have one
260          * registered when some runtime condition is met. For example a Malta
261          * kernel may include support for GIC & CPU interrupt controller IPI
262          * IRQ domains, but if run on a system with no GIC & no MT ASE then
263          * neither will be supported or registered.
264          *
265          * We only have a problem if we're actually using multiple CPUs so fail
266          * loudly if that is the case. Otherwise simply return, skipping IPI
267          * setup, if we're running with only a single CPU.
268          */
269         if (!ipidomain) {
270                 BUG_ON(num_present_cpus() > 1);
271                 return 0;
272         }
273
274         virq = irq_reserve_ipi(ipidomain, mask);
275         BUG_ON(!virq);
276         if (!call_virq)
277                 call_virq = virq;
278
279         virq = irq_reserve_ipi(ipidomain, mask);
280         BUG_ON(!virq);
281         if (!sched_virq)
282                 sched_virq = virq;
283
284         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
285                 int cpu;
286
287                 for_each_cpu(cpu, mask) {
288                         smp_ipi_init_one(call_virq + cpu, "IPI call",
289                                          ipi_call_interrupt);
290                         smp_ipi_init_one(sched_virq + cpu, "IPI resched",
291                                          ipi_resched_interrupt);
292                 }
293         } else {
294                 smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
295                 smp_ipi_init_one(sched_virq, "IPI resched",
296                                  ipi_resched_interrupt);
297         }
298
299         return 0;
300 }
301
302 int mips_smp_ipi_free(const struct cpumask *mask)
303 {
304         struct irq_domain *ipidomain;
305         struct device_node *node;
306
307         node = of_irq_find_parent(of_root);
308         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
309
310         /*
311          * Some platforms have half DT setup. So if we found irq node but
312          * didn't find an ipidomain, try to search for one that is not in the
313          * DT.
314          */
315         if (node && !ipidomain)
316                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
317
318         BUG_ON(!ipidomain);
319
320         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
321                 int cpu;
322
323                 for_each_cpu(cpu, mask) {
324                         free_irq(call_virq + cpu, NULL);
325                         free_irq(sched_virq + cpu, NULL);
326                 }
327         }
328         irq_destroy_ipi(call_virq, mask);
329         irq_destroy_ipi(sched_virq, mask);
330         return 0;
331 }
332
333
334 static int __init mips_smp_ipi_init(void)
335 {
336         if (num_possible_cpus() == 1)
337                 return 0;
338
339         mips_smp_ipi_allocate(cpu_possible_mask);
340
341         call_desc = irq_to_desc(call_virq);
342         sched_desc = irq_to_desc(sched_virq);
343
344         return 0;
345 }
346 early_initcall(mips_smp_ipi_init);
347 #endif
348
349 /*
350  * First C code run on the secondary CPUs after being started up by
351  * the master.
352  */
353 asmlinkage void start_secondary(void)
354 {
355         unsigned int cpu = raw_smp_processor_id();
356
357         cpu_probe();
358         per_cpu_trap_init(false);
359         rcutree_report_cpu_starting(cpu);
360         mips_clockevent_init();
361         mp_ops->init_secondary();
362         cpu_report();
363         maar_init();
364
365         /*
366          * XXX parity protection should be folded in here when it's converted
367          * to an option instead of something based on .cputype
368          */
369
370         calibrate_delay();
371         cpu_data[cpu].udelay_val = loops_per_jiffy;
372
373         set_cpu_sibling_map(cpu);
374         set_cpu_core_map(cpu);
375
376         cpumask_set_cpu(cpu, &cpu_coherent_mask);
377         notify_cpu_starting(cpu);
378
379         /* Notify boot CPU that we're starting & ready to sync counters */
380         complete(&cpu_starting);
381
382         synchronise_count_slave(cpu);
383
384         /* The CPU is running and counters synchronised, now mark it online */
385         set_cpu_online(cpu, true);
386
387         calculate_cpu_foreign_map();
388
389         /*
390          * Notify boot CPU that we're up & online and it can safely return
391          * from __cpu_up
392          */
393         complete(&cpu_running);
394
395         /*
396          * irq will be enabled in ->smp_finish(), enabling it too early
397          * is dangerous.
398          */
399         WARN_ON_ONCE(!irqs_disabled());
400         mp_ops->smp_finish();
401
402         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
403 }
404
405 static void stop_this_cpu(void *dummy)
406 {
407         /*
408          * Remove this CPU:
409          */
410
411         set_cpu_online(smp_processor_id(), false);
412         calculate_cpu_foreign_map();
413         local_irq_disable();
414         while (1);
415 }
416
417 void smp_send_stop(void)
418 {
419         smp_call_function(stop_this_cpu, NULL, 0);
420 }
421
422 void __init smp_cpus_done(unsigned int max_cpus)
423 {
424 }
425
426 /* called from main before smp_init() */
427 void __init smp_prepare_cpus(unsigned int max_cpus)
428 {
429         init_new_context(current, &init_mm);
430         current_thread_info()->cpu = 0;
431         mp_ops->prepare_cpus(max_cpus);
432         set_cpu_sibling_map(0);
433         set_cpu_core_map(0);
434         calculate_cpu_foreign_map();
435 #ifndef CONFIG_HOTPLUG_CPU
436         init_cpu_present(cpu_possible_mask);
437 #endif
438         cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
439 }
440
441 /* preload SMP state for boot cpu */
442 void smp_prepare_boot_cpu(void)
443 {
444         if (mp_ops->prepare_boot_cpu)
445                 mp_ops->prepare_boot_cpu();
446         set_cpu_possible(0, true);
447         set_cpu_online(0, true);
448 }
449
450 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
451 {
452         int err;
453
454         err = mp_ops->boot_secondary(cpu, tidle);
455         if (err)
456                 return err;
457
458         /* Wait for CPU to start and be ready to sync counters */
459         if (!wait_for_completion_timeout(&cpu_starting,
460                                          msecs_to_jiffies(1000))) {
461                 pr_crit("CPU%u: failed to start\n", cpu);
462                 return -EIO;
463         }
464
465         synchronise_count_master(cpu);
466
467         /* Wait for CPU to finish startup & mark itself online before return */
468         wait_for_completion(&cpu_running);
469         return 0;
470 }
471
472 #ifdef CONFIG_PROFILING
473 /* Not really SMP stuff ... */
474 int setup_profiling_timer(unsigned int multiplier)
475 {
476         return 0;
477 }
478 #endif
479
480 static void flush_tlb_all_ipi(void *info)
481 {
482         local_flush_tlb_all();
483 }
484
485 void flush_tlb_all(void)
486 {
487         if (cpu_has_mmid) {
488                 htw_stop();
489                 ginvt_full();
490                 sync_ginv();
491                 instruction_hazard();
492                 htw_start();
493                 return;
494         }
495
496         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
497 }
498
499 static void flush_tlb_mm_ipi(void *mm)
500 {
501         drop_mmu_context((struct mm_struct *)mm);
502 }
503
504 /*
505  * Special Variant of smp_call_function for use by TLB functions:
506  *
507  *  o No return value
508  *  o collapses to normal function call on UP kernels
509  *  o collapses to normal function call on systems with a single shared
510  *    primary cache.
511  */
512 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
513 {
514         smp_call_function(func, info, 1);
515 }
516
517 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
518 {
519         preempt_disable();
520
521         smp_on_other_tlbs(func, info);
522         func(info);
523
524         preempt_enable();
525 }
526
527 /*
528  * The following tlb flush calls are invoked when old translations are
529  * being torn down, or pte attributes are changing. For single threaded
530  * address spaces, a new context is obtained on the current cpu, and tlb
531  * context on other cpus are invalidated to force a new context allocation
532  * at switch_mm time, should the mm ever be used on other cpus. For
533  * multithreaded address spaces, inter-CPU interrupts have to be sent.
534  * Another case where inter-CPU interrupts are required is when the target
535  * mm might be active on another cpu (eg debuggers doing the flushes on
536  * behalf of debugees, kswapd stealing pages from another process etc).
537  * Kanoj 07/00.
538  */
539
540 void flush_tlb_mm(struct mm_struct *mm)
541 {
542         if (!mm)
543                 return;
544
545         if (atomic_read(&mm->mm_users) == 0)
546                 return;         /* happens as a result of exit_mmap() */
547
548         preempt_disable();
549
550         if (cpu_has_mmid) {
551                 /*
552                  * No need to worry about other CPUs - the ginvt in
553                  * drop_mmu_context() will be globalized.
554                  */
555         } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
556                 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
557         } else {
558                 unsigned int cpu;
559
560                 for_each_online_cpu(cpu) {
561                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
562                                 set_cpu_context(cpu, mm, 0);
563                 }
564         }
565         drop_mmu_context(mm);
566
567         preempt_enable();
568 }
569
570 struct flush_tlb_data {
571         struct vm_area_struct *vma;
572         unsigned long addr1;
573         unsigned long addr2;
574 };
575
576 static void flush_tlb_range_ipi(void *info)
577 {
578         struct flush_tlb_data *fd = info;
579
580         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
581 }
582
583 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
584 {
585         struct mm_struct *mm = vma->vm_mm;
586         unsigned long addr;
587         u32 old_mmid;
588
589         preempt_disable();
590         if (cpu_has_mmid) {
591                 htw_stop();
592                 old_mmid = read_c0_memorymapid();
593                 write_c0_memorymapid(cpu_asid(0, mm));
594                 mtc0_tlbw_hazard();
595                 addr = round_down(start, PAGE_SIZE * 2);
596                 end = round_up(end, PAGE_SIZE * 2);
597                 do {
598                         ginvt_va_mmid(addr);
599                         sync_ginv();
600                         addr += PAGE_SIZE * 2;
601                 } while (addr < end);
602                 write_c0_memorymapid(old_mmid);
603                 instruction_hazard();
604                 htw_start();
605         } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
606                 struct flush_tlb_data fd = {
607                         .vma = vma,
608                         .addr1 = start,
609                         .addr2 = end,
610                 };
611
612                 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
613                 local_flush_tlb_range(vma, start, end);
614         } else {
615                 unsigned int cpu;
616                 int exec = vma->vm_flags & VM_EXEC;
617
618                 for_each_online_cpu(cpu) {
619                         /*
620                          * flush_cache_range() will only fully flush icache if
621                          * the VMA is executable, otherwise we must invalidate
622                          * ASID without it appearing to has_valid_asid() as if
623                          * mm has been completely unused by that CPU.
624                          */
625                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
626                                 set_cpu_context(cpu, mm, !exec);
627                 }
628                 local_flush_tlb_range(vma, start, end);
629         }
630         preempt_enable();
631 }
632
633 static void flush_tlb_kernel_range_ipi(void *info)
634 {
635         struct flush_tlb_data *fd = info;
636
637         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
638 }
639
640 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
641 {
642         struct flush_tlb_data fd = {
643                 .addr1 = start,
644                 .addr2 = end,
645         };
646
647         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
648 }
649
650 static void flush_tlb_page_ipi(void *info)
651 {
652         struct flush_tlb_data *fd = info;
653
654         local_flush_tlb_page(fd->vma, fd->addr1);
655 }
656
657 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
658 {
659         u32 old_mmid;
660
661         preempt_disable();
662         if (cpu_has_mmid) {
663                 htw_stop();
664                 old_mmid = read_c0_memorymapid();
665                 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
666                 mtc0_tlbw_hazard();
667                 ginvt_va_mmid(page);
668                 sync_ginv();
669                 write_c0_memorymapid(old_mmid);
670                 instruction_hazard();
671                 htw_start();
672         } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
673                    (current->mm != vma->vm_mm)) {
674                 struct flush_tlb_data fd = {
675                         .vma = vma,
676                         .addr1 = page,
677                 };
678
679                 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
680                 local_flush_tlb_page(vma, page);
681         } else {
682                 unsigned int cpu;
683
684                 for_each_online_cpu(cpu) {
685                         /*
686                          * flush_cache_page() only does partial flushes, so
687                          * invalidate ASID without it appearing to
688                          * has_valid_asid() as if mm has been completely unused
689                          * by that CPU.
690                          */
691                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
692                                 set_cpu_context(cpu, vma->vm_mm, 1);
693                 }
694                 local_flush_tlb_page(vma, page);
695         }
696         preempt_enable();
697 }
698
699 static void flush_tlb_one_ipi(void *info)
700 {
701         unsigned long vaddr = (unsigned long) info;
702
703         local_flush_tlb_one(vaddr);
704 }
705
706 void flush_tlb_one(unsigned long vaddr)
707 {
708         smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
709 }
710
711 EXPORT_SYMBOL(flush_tlb_page);
712 EXPORT_SYMBOL(flush_tlb_one);
713
714 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
715 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
716 {
717         if (mp_ops->cleanup_dead_cpu)
718                 mp_ops->cleanup_dead_cpu(cpu);
719 }
720 #endif
721
722 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
723
724 static void tick_broadcast_callee(void *info)
725 {
726         tick_receive_broadcast();
727 }
728
729 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
730         CSD_INIT(tick_broadcast_callee, NULL);
731
732 void tick_broadcast(const struct cpumask *mask)
733 {
734         call_single_data_t *csd;
735         int cpu;
736
737         for_each_cpu(cpu, mask) {
738                 csd = &per_cpu(tick_broadcast_csd, cpu);
739                 smp_call_function_single_async(cpu, csd);
740         }
741 }
742
743 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */