1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * pseries CPU Hotplug infrastructure.
5 * Split out from arch/powerpc/platforms/pseries/setup.c
6 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
8 * Peter Bergner, IBM March 2001.
9 * Copyright (C) 2001 IBM.
10 * Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * Plus various changes from other IBM teams...
14 * Copyright (C) 2006 Michael Ellerman, IBM Corporation
17 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt
19 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h> /* for idle_task_exit */
23 #include <linux/sched/hotplug.h>
24 #include <linux/cpu.h>
26 #include <linux/slab.h>
29 #include <asm/firmware.h>
30 #include <asm/machdep.h>
31 #include <asm/vdso_datapage.h>
34 #include <asm/plpar_wrappers.h>
35 #include <asm/topology.h>
39 /* This version can't take the spinlock, because it never returns */
40 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
43 * Record the CPU ids used on each nodes.
44 * Protected by cpu_add_remove_lock.
46 static cpumask_var_t node_recorded_ids_map[MAX_NUMNODES];
48 static void rtas_stop_self(void)
50 static struct rtas_args args;
54 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
56 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
58 panic("Alas, I survived.\n");
61 static void pseries_cpu_offline_self(void)
63 unsigned int hwcpu = hard_smp_processor_id();
72 unregister_slb_shadow(hwcpu);
75 /* Should never get here... */
80 static int pseries_cpu_disable(void)
82 int cpu = smp_processor_id();
84 set_cpu_online(cpu, false);
85 vdso_data->processorCount--;
87 /*fix boot_cpuid here*/
88 if (cpu == boot_cpuid)
89 boot_cpuid = cpumask_any(cpu_online_mask);
91 /* FIXME: abstract this to not be platform specific later on */
93 xive_smp_disable_cpu();
95 xics_migrate_irqs_away();
97 cleanup_cpu_mmu_context();
103 * pseries_cpu_die: Wait for the cpu to die.
104 * @cpu: logical processor id of the CPU whose death we're awaiting.
106 * This function is called from the context of the thread which is performing
107 * the cpu-offline. Here we wait for long enough to allow the cpu in question
108 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
111 * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to
114 static void pseries_cpu_die(unsigned int cpu)
117 unsigned int pcpu = get_hard_smp_processor_id(cpu);
118 unsigned long timeout = jiffies + msecs_to_jiffies(120000);
121 cpu_status = smp_query_cpu_stopped(pcpu);
122 if (cpu_status == QCSS_STOPPED ||
123 cpu_status == QCSS_HARDWARE_ERROR)
126 if (time_after(jiffies, timeout)) {
127 pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
129 timeout = jiffies + msecs_to_jiffies(120000);
135 if (cpu_status == QCSS_HARDWARE_ERROR) {
136 pr_warn("CPU %i (hwid %i) reported error while dying\n",
140 /* Isolation and deallocation are definitely done by
141 * drslot_chrp_cpu. If they were not they would be
142 * done here. Change isolate state to Isolate and
143 * change allocation-state to Unusable.
145 paca_ptrs[cpu]->cpu_start = 0;
149 * find_cpu_id_range - found a linear ranger of @nthreads free CPU ids.
150 * @nthreads : the number of threads (cpu ids)
151 * @assigned_node : the node it belongs to or NUMA_NO_NODE if free ids from any
153 * @cpu_mask: the returned CPU mask.
155 * Returns 0 on success.
157 static int find_cpu_id_range(unsigned int nthreads, int assigned_node,
158 cpumask_var_t *cpu_mask)
160 cpumask_var_t candidate_mask;
161 unsigned int cpu, node;
164 if (!zalloc_cpumask_var(&candidate_mask, GFP_KERNEL))
167 cpumask_clear(*cpu_mask);
168 for (cpu = 0; cpu < nthreads; cpu++)
169 cpumask_set_cpu(cpu, *cpu_mask);
171 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
173 /* Get a bitmap of unoccupied slots. */
174 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
176 if (assigned_node != NUMA_NO_NODE) {
178 * Remove free ids previously assigned on the other nodes. We
179 * can walk only online nodes because once a node became online
180 * it is not turned offlined back.
182 for_each_online_node(node) {
183 if (node == assigned_node)
185 cpumask_andnot(candidate_mask, candidate_mask,
186 node_recorded_ids_map[node]);
190 if (cpumask_empty(candidate_mask))
193 while (!cpumask_empty(*cpu_mask)) {
194 if (cpumask_subset(*cpu_mask, candidate_mask))
195 /* Found a range where we can insert the new cpu(s) */
197 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads);
200 if (!cpumask_empty(*cpu_mask))
204 free_cpumask_var(candidate_mask);
209 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
210 * here is that a cpu device node may represent multiple logical cpus
211 * in the SMT case. We must honor the assumption in other code that
212 * the logical ids for sibling SMT threads x and y are adjacent, such
213 * that x^1 == y and y^1 == x.
215 static int pseries_add_processor(struct device_node *np)
217 int len, nthreads, node, cpu, assigned_node;
219 cpumask_var_t cpu_mask;
220 const __be32 *intserv;
222 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
226 nthreads = len / sizeof(u32);
228 if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
232 * Fetch from the DT nodes read by dlpar_configure_connector() the NUMA
233 * node id the added CPU belongs to.
235 node = of_node_to_nid(np);
236 if (node < 0 || !node_possible(node))
237 node = first_online_node;
239 BUG_ON(node == NUMA_NO_NODE);
240 assigned_node = node;
242 cpu_maps_update_begin();
244 rc = find_cpu_id_range(nthreads, node, &cpu_mask);
245 if (rc && nr_node_ids > 1) {
247 * Try again, considering the free CPU ids from the other node.
250 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask);
254 pr_err("Cannot add cpu %pOF; this system configuration"
255 " supports %d logical cpus.\n", np, num_possible_cpus());
259 for_each_cpu(cpu, cpu_mask) {
260 BUG_ON(cpu_present(cpu));
261 set_cpu_present(cpu, true);
262 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
265 /* Record the newly used CPU ids for the associate node. */
266 cpumask_or(node_recorded_ids_map[assigned_node],
267 node_recorded_ids_map[assigned_node], cpu_mask);
270 * If node is set to NUMA_NO_NODE, CPU ids have be reused from
271 * another node, remove them from its mask.
273 if (node == NUMA_NO_NODE) {
274 cpu = cpumask_first(cpu_mask);
275 pr_warn("Reusing free CPU ids %d-%d from another node\n",
276 cpu, cpu + nthreads - 1);
277 for_each_online_node(node) {
278 if (node == assigned_node)
280 cpumask_andnot(node_recorded_ids_map[node],
281 node_recorded_ids_map[node],
287 cpu_maps_update_done();
288 free_cpumask_var(cpu_mask);
293 * Update the present map for a cpu node which is going away, and set
294 * the hard id in the paca(s) to -1 to be consistent with boot time
295 * convention for non-present cpus.
297 static void pseries_remove_processor(struct device_node *np)
300 int len, nthreads, i;
301 const __be32 *intserv;
304 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
308 nthreads = len / sizeof(u32);
310 cpu_maps_update_begin();
311 for (i = 0; i < nthreads; i++) {
312 thread = be32_to_cpu(intserv[i]);
313 for_each_present_cpu(cpu) {
314 if (get_hard_smp_processor_id(cpu) != thread)
316 BUG_ON(cpu_online(cpu));
317 set_cpu_present(cpu, false);
318 set_hard_smp_processor_id(cpu, -1);
319 update_numa_cpu_lookup_table(cpu, -1);
322 if (cpu >= nr_cpu_ids)
323 printk(KERN_WARNING "Could not find cpu to remove "
324 "with physical id 0x%x\n", thread);
326 cpu_maps_update_done();
329 static int dlpar_offline_cpu(struct device_node *dn)
333 int len, nthreads, i;
334 const __be32 *intserv;
337 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
341 nthreads = len / sizeof(u32);
343 cpu_maps_update_begin();
344 for (i = 0; i < nthreads; i++) {
345 thread = be32_to_cpu(intserv[i]);
346 for_each_present_cpu(cpu) {
347 if (get_hard_smp_processor_id(cpu) != thread)
350 if (!cpu_online(cpu))
354 * device_offline() will return -EBUSY (via cpu_down()) if there
355 * is only one CPU left. Check it here to fail earlier and with a
356 * more informative error message, while also retaining the
357 * cpu_add_remove_lock to be sure that no CPUs are being
358 * online/offlined during this check.
360 if (num_online_cpus() == 1) {
361 pr_warn("Unable to remove last online CPU %pOFn\n", dn);
366 cpu_maps_update_done();
367 rc = device_offline(get_cpu_device(cpu));
370 cpu_maps_update_begin();
373 if (cpu == num_possible_cpus()) {
374 pr_warn("Could not find cpu to offline with physical id 0x%x\n",
379 cpu_maps_update_done();
385 static int dlpar_online_cpu(struct device_node *dn)
389 int len, nthreads, i;
390 const __be32 *intserv;
393 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
397 nthreads = len / sizeof(u32);
399 cpu_maps_update_begin();
400 for (i = 0; i < nthreads; i++) {
401 thread = be32_to_cpu(intserv[i]);
402 for_each_present_cpu(cpu) {
403 if (get_hard_smp_processor_id(cpu) != thread)
405 cpu_maps_update_done();
406 find_and_online_cpu_nid(cpu);
407 rc = device_online(get_cpu_device(cpu));
409 dlpar_offline_cpu(dn);
412 cpu_maps_update_begin();
416 if (cpu == num_possible_cpus())
417 printk(KERN_WARNING "Could not find cpu to online "
418 "with physical id 0x%x\n", thread);
420 cpu_maps_update_done();
427 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
429 struct device_node *child = NULL;
434 /* Assume cpu doesn't exist */
437 for_each_child_of_node(parent, child) {
438 rc = of_property_read_u32(child, "ibm,my-drc-index",
443 if (my_drc_index == drc_index) {
453 static bool drc_info_valid_index(struct device_node *parent, u32 drc_index)
455 struct property *info;
456 struct of_drc_info drc;
461 info = of_find_property(parent, "ibm,drc-info", NULL);
465 value = of_prop_next_u32(info, NULL, &count);
467 /* First value of ibm,drc-info is number of drc-info records */
473 for (i = 0; i < count; i++) {
474 if (of_read_drc_info_cell(&info, &value, &drc))
477 if (strncmp(drc.drc_type, "CPU", 3))
480 if (drc_index > drc.last_drc_index)
483 index = drc.drc_index_start;
484 for (j = 0; j < drc.num_sequential_elems; j++) {
485 if (drc_index == index)
488 index += drc.sequential_inc;
495 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
500 if (of_find_property(parent, "ibm,drc-info", NULL))
501 return drc_info_valid_index(parent, drc_index);
503 /* Note that the format of the ibm,drc-indexes array is
504 * the number of entries in the array followed by the array
505 * of drc values so we start looking at index = 1.
511 rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
517 if (drc == drc_index)
524 static ssize_t dlpar_cpu_add(u32 drc_index)
526 struct device_node *dn, *parent;
529 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
531 parent = of_find_node_by_path("/cpus");
533 pr_warn("Failed to find CPU root node \"/cpus\"\n");
537 if (dlpar_cpu_exists(parent, drc_index)) {
539 pr_warn("CPU with drc index %x already exists\n", drc_index);
543 if (!valid_cpu_drc_index(parent, drc_index)) {
545 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
549 rc = dlpar_acquire_drc(drc_index);
551 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
557 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
559 pr_warn("Failed call to configure-connector, drc index: %x\n",
561 dlpar_release_drc(drc_index);
566 rc = dlpar_attach_node(dn, parent);
568 /* Regardless we are done with parent now */
573 pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n",
576 rc = dlpar_release_drc(drc_index);
578 dlpar_free_cc_nodes(dn);
583 update_numa_distance(dn);
585 rc = dlpar_online_cpu(dn);
588 pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n",
591 rc = dlpar_detach_node(dn);
593 dlpar_release_drc(drc_index);
598 pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn,
603 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
607 pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n",
610 rc = dlpar_offline_cpu(dn);
612 pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc);
616 rc = dlpar_release_drc(drc_index);
618 pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
620 dlpar_online_cpu(dn);
624 rc = dlpar_detach_node(dn);
628 pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc);
630 rc = dlpar_acquire_drc(drc_index);
632 dlpar_online_cpu(dn);
637 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
641 static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
643 struct device_node *dn;
647 for_each_node_by_type(dn, "cpu") {
648 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
652 if (my_index == drc_index)
659 static int dlpar_cpu_remove_by_index(u32 drc_index)
661 struct device_node *dn;
664 dn = cpu_drc_index_to_dn(drc_index);
666 pr_warn("Cannot find CPU (drc index %x) to remove\n",
671 rc = dlpar_cpu_remove(dn, drc_index);
676 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
678 struct device_node *dn;
682 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not
683 * remove the last CPU.
685 for_each_node_by_type(dn, "cpu") {
688 if (cpus_found > cpus_to_remove) {
693 /* Note that cpus_found is always 1 ahead of the index
694 * into the cpu_drcs array, so we use cpus_found - 1
696 rc = of_property_read_u32(dn, "ibm,my-drc-index",
697 &cpu_drcs[cpus_found - 1]);
699 pr_warn("Error occurred getting drc-index for %pOFn\n",
706 if (cpus_found < cpus_to_remove) {
707 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
708 cpus_found, cpus_to_remove);
709 } else if (cpus_found == cpus_to_remove) {
710 pr_warn("Cannot remove all CPUs\n");
716 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
720 int cpus_removed = 0;
723 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
725 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
729 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
730 if (cpus_found <= cpus_to_remove) {
735 for (i = 0; i < cpus_to_remove; i++) {
736 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
743 if (cpus_removed != cpus_to_remove) {
744 pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
746 for (i = 0; i < cpus_removed; i++)
747 dlpar_cpu_add(cpu_drcs[i]);
758 static int find_drc_info_cpus_to_add(struct device_node *cpus,
759 struct property *info,
760 u32 *cpu_drcs, u32 cpus_to_add)
762 struct of_drc_info drc;
764 u32 count, drc_index;
771 value = of_prop_next_u32(info, NULL, &count);
775 for (i = 0; i < count; i++) {
776 of_read_drc_info_cell(&info, &value, &drc);
777 if (strncmp(drc.drc_type, "CPU", 3))
780 drc_index = drc.drc_index_start;
781 for (j = 0; j < drc.num_sequential_elems; j++) {
782 if (dlpar_cpu_exists(cpus, drc_index))
785 cpu_drcs[cpus_found++] = drc_index;
787 if (cpus_found == cpus_to_add)
790 drc_index += drc.sequential_inc;
797 static int find_drc_index_cpus_to_add(struct device_node *cpus,
798 u32 *cpu_drcs, u32 cpus_to_add)
804 /* Search the ibm,drc-indexes array for possible CPU drcs to
805 * add. Note that the format of the ibm,drc-indexes array is
806 * the number of entries in the array followed by the array
807 * of drc values so we start looking at index = 1.
810 while (cpus_found < cpus_to_add) {
811 rc = of_property_read_u32_index(cpus, "ibm,drc-indexes",
812 index++, &drc_index);
817 if (dlpar_cpu_exists(cpus, drc_index))
820 cpu_drcs[cpus_found++] = drc_index;
826 static int dlpar_cpu_add_by_count(u32 cpus_to_add)
828 struct device_node *parent;
829 struct property *info;
835 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
837 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
841 parent = of_find_node_by_path("/cpus");
843 pr_warn("Could not find CPU root node in device tree\n");
848 info = of_find_property(parent, "ibm,drc-info", NULL);
850 cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add);
852 cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add);
856 if (cpus_found < cpus_to_add) {
857 pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
858 cpus_found, cpus_to_add);
863 for (i = 0; i < cpus_to_add; i++) {
864 rc = dlpar_cpu_add(cpu_drcs[i]);
871 if (cpus_added < cpus_to_add) {
872 pr_warn("CPU hot-add failed, removing any added CPUs\n");
874 for (i = 0; i < cpus_added; i++)
875 dlpar_cpu_remove_by_index(cpu_drcs[i]);
886 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
888 u32 count, drc_index;
891 count = hp_elog->_drc_u.drc_count;
892 drc_index = hp_elog->_drc_u.drc_index;
894 lock_device_hotplug();
896 switch (hp_elog->action) {
897 case PSERIES_HP_ELOG_ACTION_REMOVE:
898 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
899 rc = dlpar_cpu_remove_by_count(count);
900 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
901 rc = dlpar_cpu_remove_by_index(drc_index);
903 * Setting the isolation state of an UNISOLATED/CONFIGURED
904 * device to UNISOLATE is a no-op, but the hypervisor can
905 * use it as a hint that the CPU removal failed.
908 dlpar_unisolate_drc(drc_index);
913 case PSERIES_HP_ELOG_ACTION_ADD:
914 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
915 rc = dlpar_cpu_add_by_count(count);
916 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
917 rc = dlpar_cpu_add(drc_index);
922 pr_err("Invalid action (%d) specified\n", hp_elog->action);
927 unlock_device_hotplug();
931 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
933 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
938 rc = kstrtou32(buf, 0, &drc_index);
942 rc = dlpar_cpu_add(drc_index);
944 return rc ? rc : count;
947 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
949 struct device_node *dn;
953 dn = of_find_node_by_path(buf);
957 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
963 rc = dlpar_cpu_remove(dn, drc_index);
966 return rc ? rc : count;
969 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
971 static int pseries_smp_notifier(struct notifier_block *nb,
972 unsigned long action, void *data)
974 struct of_reconfig_data *rd = data;
978 case OF_RECONFIG_ATTACH_NODE:
979 err = pseries_add_processor(rd->dn);
981 case OF_RECONFIG_DETACH_NODE:
982 pseries_remove_processor(rd->dn);
985 return notifier_from_errno(err);
988 static struct notifier_block pseries_smp_nb = {
989 .notifier_call = pseries_smp_notifier,
992 static int __init pseries_cpu_hotplug_init(void)
997 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
998 ppc_md.cpu_probe = dlpar_cpu_probe;
999 ppc_md.cpu_release = dlpar_cpu_release;
1000 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
1002 rtas_stop_self_token = rtas_token("stop-self");
1003 qcss_tok = rtas_token("query-cpu-stopped-state");
1005 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
1006 qcss_tok == RTAS_UNKNOWN_SERVICE) {
1007 printk(KERN_INFO "CPU Hotplug not supported by firmware "
1012 smp_ops->cpu_offline_self = pseries_cpu_offline_self;
1013 smp_ops->cpu_disable = pseries_cpu_disable;
1014 smp_ops->cpu_die = pseries_cpu_die;
1016 /* Processors can be added/removed only on LPAR */
1017 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1018 for_each_node(node) {
1019 alloc_bootmem_cpumask_var(&node_recorded_ids_map[node]);
1021 /* Record ids of CPU added at boot time */
1022 cpumask_or(node_recorded_ids_map[node],
1023 node_recorded_ids_map[node],
1024 cpumask_of_node(node));
1027 of_reconfig_notifier_register(&pseries_smp_nb);
1032 machine_arch_initcall(pseries, pseries_cpu_hotplug_init);