1 // SPDX-License-Identifier: GPL-2.0
3 * SMP related functions
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
14 * The code outside of smp.c uses logical cpu numbers, only smp.c does
15 * the translation of logical to physical cpu ids. All new code that
16 * operates on physical cpu numbers needs to go into smp.c.
19 #define KMSG_COMPONENT "cpu"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/workqueue.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
27 #include <linux/err.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/irqflags.h>
33 #include <linux/cpu.h>
34 #include <linux/slab.h>
35 #include <linux/sched/hotplug.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/crash_dump.h>
38 #include <linux/memblock.h>
39 #include <linux/kprobes.h>
40 #include <asm/asm-offsets.h>
42 #include <asm/switch_to.h>
43 #include <asm/facility.h>
45 #include <asm/setup.h>
47 #include <asm/tlbflush.h>
48 #include <asm/vtimer.h>
49 #include <asm/lowcore.h>
52 #include <asm/debug.h>
53 #include <asm/os_info.h>
57 #include <asm/topology.h>
62 ec_call_function_single,
71 static DEFINE_PER_CPU(struct cpu *, cpu_device);
74 struct lowcore *lowcore; /* lowcore page(s) for the cpu */
75 unsigned long ec_mask; /* bit mask for ec_xxx functions */
76 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
77 signed char state; /* physical cpu state */
78 signed char polarization; /* physical polarization */
79 u16 address; /* physical cpu address */
82 static u8 boot_core_type;
83 static struct pcpu pcpu_devices[NR_CPUS];
85 unsigned int smp_cpu_mt_shift;
86 EXPORT_SYMBOL(smp_cpu_mt_shift);
88 unsigned int smp_cpu_mtid;
89 EXPORT_SYMBOL(smp_cpu_mtid);
91 #ifdef CONFIG_CRASH_DUMP
92 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
95 static unsigned int smp_max_threads __initdata = -1U;
97 static int __init early_nosmt(char *s)
102 early_param("nosmt", early_nosmt);
104 static int __init early_smt(char *s)
106 get_option(&s, &smp_max_threads);
109 early_param("smt", early_smt);
112 * The smp_cpu_state_mutex must be held when changing the state or polarization
113 * member of a pcpu data structure within the pcpu_devices arreay.
115 DEFINE_MUTEX(smp_cpu_state_mutex);
118 * Signal processor helper functions.
120 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
125 cc = __pcpu_sigp(addr, order, parm, NULL);
126 if (cc != SIGP_CC_BUSY)
132 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
136 for (retry = 0; ; retry++) {
137 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
138 if (cc != SIGP_CC_BUSY)
146 static inline int pcpu_stopped(struct pcpu *pcpu)
148 u32 uninitialized_var(status);
150 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
151 0, &status) != SIGP_CC_STATUS_STORED)
153 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
156 static inline int pcpu_running(struct pcpu *pcpu)
158 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
159 0, NULL) != SIGP_CC_STATUS_STORED)
161 /* Status stored condition code is equivalent to cpu not running. */
166 * Find struct pcpu by cpu address.
168 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
172 for_each_cpu(cpu, mask)
173 if (pcpu_devices[cpu].address == address)
174 return pcpu_devices + cpu;
178 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
182 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
184 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
185 pcpu->ec_clk = get_tod_clock_fast();
186 pcpu_sigp_retry(pcpu, order, 0);
189 #define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
190 #define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
194 unsigned long async_stack, panic_stack;
197 if (pcpu != &pcpu_devices[0]) {
198 pcpu->lowcore = (struct lowcore *)
199 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
200 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
201 panic_stack = __get_free_page(GFP_KERNEL);
202 if (!pcpu->lowcore || !panic_stack || !async_stack)
205 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
206 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
209 memcpy(lc, &S390_lowcore, 512);
210 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
211 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
212 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
214 lc->spinlock_lockval = arch_spin_lockval(cpu);
215 lc->spinlock_index = 0;
216 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
217 if (nmi_alloc_per_cpu(lc))
219 if (vdso_alloc_per_cpu(lc))
221 lowcore_ptr[cpu] = lc;
222 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
226 nmi_free_per_cpu(lc);
228 if (pcpu != &pcpu_devices[0]) {
229 free_page(panic_stack);
230 free_pages(async_stack, ASYNC_ORDER);
231 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
236 #ifdef CONFIG_HOTPLUG_CPU
238 static void pcpu_free_lowcore(struct pcpu *pcpu)
240 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
241 lowcore_ptr[pcpu - pcpu_devices] = NULL;
242 vdso_free_per_cpu(pcpu->lowcore);
243 nmi_free_per_cpu(pcpu->lowcore);
244 if (pcpu == &pcpu_devices[0])
246 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
247 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
248 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
251 #endif /* CONFIG_HOTPLUG_CPU */
253 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
255 struct lowcore *lc = pcpu->lowcore;
257 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
258 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
260 lc->spinlock_lockval = arch_spin_lockval(cpu);
261 lc->spinlock_index = 0;
262 lc->percpu_offset = __per_cpu_offset[cpu];
263 lc->kernel_asce = S390_lowcore.kernel_asce;
264 lc->user_asce = S390_lowcore.kernel_asce;
265 lc->machine_flags = S390_lowcore.machine_flags;
266 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
267 __ctl_store(lc->cregs_save_area, 0, 15);
268 lc->cregs_save_area[1] = lc->kernel_asce;
269 lc->cregs_save_area[7] = lc->vdso_asce;
270 save_access_regs((unsigned int *) lc->access_regs_save_area);
271 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
272 sizeof(lc->stfle_fac_list));
273 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
274 sizeof(lc->alt_stfle_fac_list));
275 arch_spin_lock_setup(cpu);
278 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
280 struct lowcore *lc = pcpu->lowcore;
282 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
283 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
284 lc->current_task = (unsigned long) tsk;
286 lc->current_pid = tsk->pid;
287 lc->user_timer = tsk->thread.user_timer;
288 lc->guest_timer = tsk->thread.guest_timer;
289 lc->system_timer = tsk->thread.system_timer;
290 lc->hardirq_timer = tsk->thread.hardirq_timer;
291 lc->softirq_timer = tsk->thread.softirq_timer;
295 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
297 struct lowcore *lc = pcpu->lowcore;
299 lc->restart_stack = lc->kernel_stack;
300 lc->restart_fn = (unsigned long) func;
301 lc->restart_data = (unsigned long) data;
302 lc->restart_source = -1UL;
303 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
307 * Call function via PSW restart on pcpu and stop the current cpu.
309 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
310 void *data, unsigned long stack)
312 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
313 unsigned long source_cpu = stap();
315 __load_psw_mask(PSW_KERNEL_BITS);
316 if (pcpu->address == source_cpu)
317 func(data); /* should not return */
318 /* Stop target cpu (if func returns this stops the current cpu). */
319 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
320 /* Restart func on the target cpu and stop the current cpu. */
321 mem_assign_absolute(lc->restart_stack, stack);
322 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
323 mem_assign_absolute(lc->restart_data, (unsigned long) data);
324 mem_assign_absolute(lc->restart_source, source_cpu);
327 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
328 " brc 2,0b # busy, try again\n"
329 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
330 " brc 2,1b # busy, try again\n"
331 : : "d" (pcpu->address), "d" (source_cpu),
332 "K" (SIGP_RESTART), "K" (SIGP_STOP)
338 * Enable additional logical cpus for multi-threading.
340 static int pcpu_set_smt(unsigned int mtid)
344 if (smp_cpu_mtid == mtid)
346 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
349 smp_cpu_mt_shift = 0;
350 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
352 pcpu_devices[0].address = stap();
358 * Call function on an online CPU.
360 void smp_call_online_cpu(void (*func)(void *), void *data)
364 /* Use the current cpu if it is online. */
365 pcpu = pcpu_find_address(cpu_online_mask, stap());
367 /* Use the first online cpu. */
368 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
369 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
373 * Call function on the ipl CPU.
375 void smp_call_ipl_cpu(void (*func)(void *), void *data)
377 struct lowcore *lc = pcpu_devices->lowcore;
379 if (pcpu_devices[0].address == stap())
382 pcpu_delegate(&pcpu_devices[0], func, data,
383 lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
386 int smp_find_processor_id(u16 address)
390 for_each_present_cpu(cpu)
391 if (pcpu_devices[cpu].address == address)
396 bool notrace arch_vcpu_is_preempted(int cpu)
398 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
400 if (pcpu_running(pcpu_devices + cpu))
404 EXPORT_SYMBOL(arch_vcpu_is_preempted);
406 void notrace smp_yield_cpu(int cpu)
408 if (MACHINE_HAS_DIAG9C) {
409 diag_stat_inc_norecursion(DIAG_STAT_X09C);
410 asm volatile("diag %0,0,0x9c"
411 : : "d" (pcpu_devices[cpu].address));
412 } else if (MACHINE_HAS_DIAG44) {
413 diag_stat_inc_norecursion(DIAG_STAT_X044);
414 asm volatile("diag 0,0,0x44");
419 * Send cpus emergency shutdown signal. This gives the cpus the
420 * opportunity to complete outstanding interrupts.
422 void notrace smp_emergency_stop(void)
428 cpumask_copy(&cpumask, cpu_online_mask);
429 cpumask_clear_cpu(smp_processor_id(), &cpumask);
431 end = get_tod_clock() + (1000000UL << 12);
432 for_each_cpu(cpu, &cpumask) {
433 struct pcpu *pcpu = pcpu_devices + cpu;
434 set_bit(ec_stop_cpu, &pcpu->ec_mask);
435 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
436 0, NULL) == SIGP_CC_BUSY &&
437 get_tod_clock() < end)
440 while (get_tod_clock() < end) {
441 for_each_cpu(cpu, &cpumask)
442 if (pcpu_stopped(pcpu_devices + cpu))
443 cpumask_clear_cpu(cpu, &cpumask);
444 if (cpumask_empty(&cpumask))
449 NOKPROBE_SYMBOL(smp_emergency_stop);
452 * Stop all cpus but the current one.
454 void smp_send_stop(void)
458 /* Disable all interrupts/machine checks */
459 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
460 trace_hardirqs_off();
462 debug_set_critical();
464 if (oops_in_progress)
465 smp_emergency_stop();
467 /* stop all processors */
468 for_each_online_cpu(cpu) {
469 if (cpu == smp_processor_id())
471 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
472 while (!pcpu_stopped(pcpu_devices + cpu))
478 * This is the main routine where commands issued by other
481 static void smp_handle_ext_call(void)
485 /* handle bit signal external calls */
486 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
487 if (test_bit(ec_stop_cpu, &bits))
489 if (test_bit(ec_schedule, &bits))
491 if (test_bit(ec_call_function_single, &bits))
492 generic_smp_call_function_single_interrupt();
495 static void do_ext_call_interrupt(struct ext_code ext_code,
496 unsigned int param32, unsigned long param64)
498 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
499 smp_handle_ext_call();
502 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
506 for_each_cpu(cpu, mask)
507 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
510 void arch_send_call_function_single_ipi(int cpu)
512 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
516 * this function sends a 'reschedule' IPI to another CPU.
517 * it goes straight through and wastes no time serializing
518 * anything. Worst case is that we lose a reschedule ...
520 void smp_send_reschedule(int cpu)
522 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
526 * parameter area for the set/clear control bit callbacks
528 struct ec_creg_mask_parms {
530 unsigned long andval;
535 * callback for setting/clearing control bits
537 static void smp_ctl_bit_callback(void *info)
539 struct ec_creg_mask_parms *pp = info;
540 unsigned long cregs[16];
542 __ctl_store(cregs, 0, 15);
543 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
544 __ctl_load(cregs, 0, 15);
548 * Set a bit in a control register of all cpus
550 void smp_ctl_set_bit(int cr, int bit)
552 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
554 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
556 EXPORT_SYMBOL(smp_ctl_set_bit);
559 * Clear a bit in a control register of all cpus
561 void smp_ctl_clear_bit(int cr, int bit)
563 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
565 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
567 EXPORT_SYMBOL(smp_ctl_clear_bit);
569 #ifdef CONFIG_CRASH_DUMP
571 int smp_store_status(int cpu)
573 struct pcpu *pcpu = pcpu_devices + cpu;
576 pa = __pa(&pcpu->lowcore->floating_pt_save_area);
577 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
578 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
580 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
582 pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
584 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
585 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
586 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
592 * Collect CPU state of the previous, crashed system.
593 * There are four cases:
594 * 1) standard zfcp dump
595 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
596 * The state for all CPUs except the boot CPU needs to be collected
597 * with sigp stop-and-store-status. The boot CPU state is located in
598 * the absolute lowcore of the memory stored in the HSA. The zcore code
599 * will copy the boot CPU state from the HSA.
600 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
601 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
602 * The state for all CPUs except the boot CPU needs to be collected
603 * with sigp stop-and-store-status. The firmware or the boot-loader
604 * stored the registers of the boot CPU in the absolute lowcore in the
605 * memory of the old system.
606 * 3) kdump and the old kernel did not store the CPU state,
607 * or stand-alone kdump for DASD
608 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
609 * The state for all CPUs except the boot CPU needs to be collected
610 * with sigp stop-and-store-status. The kexec code or the boot-loader
611 * stored the registers of the boot CPU in the memory of the old system.
612 * 4) kdump and the old kernel stored the CPU state
613 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
614 * This case does not exist for s390 anymore, setup_arch explicitly
615 * deactivates the elfcorehdr= kernel parameter
617 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
618 bool is_boot_cpu, unsigned long page)
620 __vector128 *vxrs = (__vector128 *) page;
623 vxrs = boot_cpu_vector_save_area;
625 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
626 save_area_add_vxrs(sa, vxrs);
629 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
630 bool is_boot_cpu, unsigned long page)
632 void *regs = (void *) page;
635 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
637 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
638 save_area_add_regs(sa, regs);
641 void __init smp_save_dump_cpus(void)
643 int addr, boot_cpu_addr, max_cpu_addr;
644 struct save_area *sa;
648 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
649 /* No previous system present, normal boot. */
651 /* Allocate a page as dumping area for the store status sigps */
652 page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
653 /* Set multi-threading state to the previous system. */
654 pcpu_set_smt(sclp.mtid_prev);
655 boot_cpu_addr = stap();
656 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
657 for (addr = 0; addr <= max_cpu_addr; addr++) {
658 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
659 SIGP_CC_NOT_OPERATIONAL)
661 is_boot_cpu = (addr == boot_cpu_addr);
662 /* Allocate save area */
663 sa = save_area_alloc(is_boot_cpu);
665 panic("could not allocate memory for save area\n");
667 /* Get the vector registers */
668 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
670 * For a zfcp dump OLDMEM_BASE == NULL and the registers
671 * of the boot CPU are stored in the HSA. To retrieve
672 * these registers an SCLP request is required which is
673 * done by drivers/s390/char/zcore.c:init_cpu_info()
675 if (!is_boot_cpu || OLDMEM_BASE)
676 /* Get the CPU registers */
677 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
679 memblock_free(page, PAGE_SIZE);
683 #endif /* CONFIG_CRASH_DUMP */
685 void smp_cpu_set_polarization(int cpu, int val)
687 pcpu_devices[cpu].polarization = val;
690 int smp_cpu_get_polarization(int cpu)
692 return pcpu_devices[cpu].polarization;
695 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
697 static int use_sigp_detection;
700 if (use_sigp_detection || sclp_get_core_info(info, early)) {
701 use_sigp_detection = 1;
703 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
704 address += (1U << smp_cpu_mt_shift)) {
705 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
706 SIGP_CC_NOT_OPERATIONAL)
708 info->core[info->configured].core_id =
709 address >> smp_cpu_mt_shift;
712 info->combined = info->configured;
716 static int smp_add_present_cpu(int cpu);
718 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
719 bool configured, bool early)
726 if (sclp.has_core_type && core->type != boot_core_type)
728 cpu = cpumask_first(avail);
729 address = core->core_id << smp_cpu_mt_shift;
730 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
731 if (pcpu_find_address(cpu_present_mask, address + i))
733 pcpu = pcpu_devices + cpu;
734 pcpu->address = address + i;
736 pcpu->state = CPU_STATE_CONFIGURED;
738 pcpu->state = CPU_STATE_STANDBY;
739 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
740 set_cpu_present(cpu, true);
741 if (!early && smp_add_present_cpu(cpu) != 0)
742 set_cpu_present(cpu, false);
745 cpumask_clear_cpu(cpu, avail);
746 cpu = cpumask_next(cpu, avail);
751 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
753 struct sclp_core_entry *core;
754 static cpumask_t avail;
760 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
762 * Add IPL core first (which got logical CPU number 0) to make sure
763 * that all SMT threads get subsequent logical CPU numbers.
766 core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
767 for (i = 0; i < info->configured; i++) {
768 core = &info->core[i];
769 if (core->core_id == core_id) {
770 nr += smp_add_core(core, &avail, true, early);
775 for (i = 0; i < info->combined; i++) {
776 configured = i < info->configured;
777 nr += smp_add_core(&info->core[i], &avail, configured, early);
782 void __init smp_detect_cpus(void)
784 unsigned int cpu, mtid, c_cpus, s_cpus;
785 struct sclp_core_info *info;
788 /* Get CPU information */
789 info = memblock_virt_alloc(sizeof(*info), 8);
790 smp_get_core_info(info, 1);
791 /* Find boot CPU type */
792 if (sclp.has_core_type) {
794 for (cpu = 0; cpu < info->combined; cpu++)
795 if (info->core[cpu].core_id == address) {
796 /* The boot cpu dictates the cpu type. */
797 boot_core_type = info->core[cpu].type;
800 if (cpu >= info->combined)
801 panic("Could not find boot CPU type");
804 /* Set multi-threading state for the current system */
805 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
806 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
809 /* Print number of CPUs */
811 for (cpu = 0; cpu < info->combined; cpu++) {
812 if (sclp.has_core_type &&
813 info->core[cpu].type != boot_core_type)
815 if (cpu < info->configured)
816 c_cpus += smp_cpu_mtid + 1;
818 s_cpus += smp_cpu_mtid + 1;
820 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
822 /* Add CPUs present at boot */
824 __smp_rescan_cpus(info, true);
826 memblock_free_early((unsigned long)info, sizeof(*info));
830 * Activate a secondary processor.
832 static void smp_start_secondary(void *cpuvoid)
834 int cpu = raw_smp_processor_id();
836 S390_lowcore.last_update_clock = get_tod_clock();
837 S390_lowcore.restart_stack = (unsigned long) restart_stack;
838 S390_lowcore.restart_fn = (unsigned long) do_restart;
839 S390_lowcore.restart_data = 0;
840 S390_lowcore.restart_source = -1UL;
841 restore_access_regs(S390_lowcore.access_regs_save_area);
842 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
843 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
844 set_cpu_flag(CIF_ASCE_PRIMARY);
845 set_cpu_flag(CIF_ASCE_SECONDARY);
847 rcu_cpu_starting(cpu);
852 notify_cpu_starting(cpu);
853 if (topology_cpu_dedicated(cpu))
854 set_cpu_flag(CIF_DEDICATED_CPU);
856 clear_cpu_flag(CIF_DEDICATED_CPU);
857 set_cpu_online(cpu, true);
858 inc_irq_stat(CPU_RST);
860 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
863 /* Upping and downing of CPUs */
864 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
866 struct pcpu *pcpu = pcpu_devices + cpu;
869 if (pcpu->state != CPU_STATE_CONFIGURED)
871 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
872 SIGP_CC_ORDER_CODE_ACCEPTED)
875 rc = pcpu_alloc_lowcore(pcpu, cpu);
878 pcpu_prepare_secondary(pcpu, cpu);
879 pcpu_attach_task(pcpu, tidle);
880 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
881 /* Wait until cpu puts itself in the online & active maps */
882 while (!cpu_online(cpu))
887 static unsigned int setup_possible_cpus __initdata;
889 static int __init _setup_possible_cpus(char *s)
891 get_option(&s, &setup_possible_cpus);
894 early_param("possible_cpus", _setup_possible_cpus);
896 #ifdef CONFIG_HOTPLUG_CPU
898 int __cpu_disable(void)
900 unsigned long cregs[16];
902 /* Handle possible pending IPIs */
903 smp_handle_ext_call();
904 set_cpu_online(smp_processor_id(), false);
905 /* Disable pseudo page faults on this cpu. */
907 /* Disable interrupt sources via control register. */
908 __ctl_store(cregs, 0, 15);
909 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
910 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
911 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
912 __ctl_load(cregs, 0, 15);
913 clear_cpu_flag(CIF_NOHZ_DELAY);
917 void __cpu_die(unsigned int cpu)
921 /* Wait until target cpu is down */
922 pcpu = pcpu_devices + cpu;
923 while (!pcpu_stopped(pcpu))
925 pcpu_free_lowcore(pcpu);
926 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
927 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
930 void __noreturn cpu_die(void)
934 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
938 #endif /* CONFIG_HOTPLUG_CPU */
940 void __init smp_fill_possible_mask(void)
942 unsigned int possible, sclp_max, cpu;
944 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
945 sclp_max = min(smp_max_threads, sclp_max);
946 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
947 possible = setup_possible_cpus ?: nr_cpu_ids;
948 possible = min(possible, sclp_max);
949 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
950 set_cpu_possible(cpu, true);
953 void __init smp_prepare_cpus(unsigned int max_cpus)
955 /* request the 0x1201 emergency signal external interrupt */
956 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
957 panic("Couldn't request external interrupt 0x1201");
958 /* request the 0x1202 external call external interrupt */
959 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
960 panic("Couldn't request external interrupt 0x1202");
963 void __init smp_prepare_boot_cpu(void)
965 struct pcpu *pcpu = pcpu_devices;
967 WARN_ON(!cpu_present(0) || !cpu_online(0));
968 pcpu->state = CPU_STATE_CONFIGURED;
969 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
970 S390_lowcore.percpu_offset = __per_cpu_offset[0];
971 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
974 void __init smp_cpus_done(unsigned int max_cpus)
978 void __init smp_setup_processor_id(void)
980 pcpu_devices[0].address = stap();
981 S390_lowcore.cpu_nr = 0;
982 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
983 S390_lowcore.spinlock_index = 0;
987 * the frequency of the profiling timer can be changed
988 * by writing a multiplier value into /proc/profile.
990 * usually you want to run this on all CPUs ;)
992 int setup_profiling_timer(unsigned int multiplier)
997 #ifdef CONFIG_HOTPLUG_CPU
998 static ssize_t cpu_configure_show(struct device *dev,
999 struct device_attribute *attr, char *buf)
1003 mutex_lock(&smp_cpu_state_mutex);
1004 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
1005 mutex_unlock(&smp_cpu_state_mutex);
1009 static ssize_t cpu_configure_store(struct device *dev,
1010 struct device_attribute *attr,
1011 const char *buf, size_t count)
1014 int cpu, val, rc, i;
1017 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1019 if (val != 0 && val != 1)
1022 mutex_lock(&smp_cpu_state_mutex);
1024 /* disallow configuration changes of online cpus and cpu 0 */
1026 cpu = smp_get_base_cpu(cpu);
1029 for (i = 0; i <= smp_cpu_mtid; i++)
1030 if (cpu_online(cpu + i))
1032 pcpu = pcpu_devices + cpu;
1036 if (pcpu->state != CPU_STATE_CONFIGURED)
1038 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1041 for (i = 0; i <= smp_cpu_mtid; i++) {
1042 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1044 pcpu[i].state = CPU_STATE_STANDBY;
1045 smp_cpu_set_polarization(cpu + i,
1046 POLARIZATION_UNKNOWN);
1048 topology_expect_change();
1051 if (pcpu->state != CPU_STATE_STANDBY)
1053 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1056 for (i = 0; i <= smp_cpu_mtid; i++) {
1057 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1059 pcpu[i].state = CPU_STATE_CONFIGURED;
1060 smp_cpu_set_polarization(cpu + i,
1061 POLARIZATION_UNKNOWN);
1063 topology_expect_change();
1069 mutex_unlock(&smp_cpu_state_mutex);
1071 return rc ? rc : count;
1073 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1074 #endif /* CONFIG_HOTPLUG_CPU */
1076 static ssize_t show_cpu_address(struct device *dev,
1077 struct device_attribute *attr, char *buf)
1079 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1081 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1083 static struct attribute *cpu_common_attrs[] = {
1084 #ifdef CONFIG_HOTPLUG_CPU
1085 &dev_attr_configure.attr,
1087 &dev_attr_address.attr,
1091 static struct attribute_group cpu_common_attr_group = {
1092 .attrs = cpu_common_attrs,
1095 static struct attribute *cpu_online_attrs[] = {
1096 &dev_attr_idle_count.attr,
1097 &dev_attr_idle_time_us.attr,
1101 static struct attribute_group cpu_online_attr_group = {
1102 .attrs = cpu_online_attrs,
1105 static int smp_cpu_online(unsigned int cpu)
1107 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1109 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1111 static int smp_cpu_pre_down(unsigned int cpu)
1113 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1115 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1119 static int smp_add_present_cpu(int cpu)
1125 c = kzalloc(sizeof(*c), GFP_KERNEL);
1128 per_cpu(cpu_device, cpu) = c;
1130 c->hotpluggable = 1;
1131 rc = register_cpu(c, cpu);
1134 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1137 rc = topology_cpu_init(c);
1143 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1145 #ifdef CONFIG_HOTPLUG_CPU
1152 #ifdef CONFIG_HOTPLUG_CPU
1154 int __ref smp_rescan_cpus(void)
1156 struct sclp_core_info *info;
1159 info = kzalloc(sizeof(*info), GFP_KERNEL);
1162 smp_get_core_info(info, 0);
1164 mutex_lock(&smp_cpu_state_mutex);
1165 nr = __smp_rescan_cpus(info, false);
1166 mutex_unlock(&smp_cpu_state_mutex);
1170 topology_schedule_update();
1174 static ssize_t __ref rescan_store(struct device *dev,
1175 struct device_attribute *attr,
1181 rc = lock_device_hotplug_sysfs();
1184 rc = smp_rescan_cpus();
1185 unlock_device_hotplug();
1186 return rc ? rc : count;
1188 static DEVICE_ATTR_WO(rescan);
1189 #endif /* CONFIG_HOTPLUG_CPU */
1191 static int __init s390_smp_init(void)
1195 #ifdef CONFIG_HOTPLUG_CPU
1196 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1200 for_each_present_cpu(cpu) {
1201 rc = smp_add_present_cpu(cpu);
1206 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1207 smp_cpu_online, smp_cpu_pre_down);
1208 rc = rc <= 0 ? rc : 0;
1212 subsys_initcall(s390_smp_init);