2 * pseries CPU Hotplug infrastructure.
4 * Split out from arch/powerpc/platforms/pseries/setup.c
5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
7 * Peter Bergner, IBM March 2001.
8 * Copyright (C) 2001 IBM.
9 * Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 * Plus various changes from other IBM teams...
13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/sched.h> /* for idle_task_exit */
27 #include <linux/sched/hotplug.h>
28 #include <linux/cpu.h>
30 #include <linux/slab.h>
33 #include <asm/firmware.h>
34 #include <asm/machdep.h>
35 #include <asm/vdso_datapage.h>
38 #include <asm/plpar_wrappers.h>
39 #include <asm/topology.h>
42 #include "offline_states.h"
44 /* This version can't take the spinlock, because it never returns */
45 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
47 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
49 static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE;
51 static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE;
53 static bool cede_offline_enabled __read_mostly = true;
56 * Enable/disable cede_offline when available.
58 static int __init setup_cede_offline(char *str)
60 return (kstrtobool(str, &cede_offline_enabled) == 0);
63 __setup("cede_offline=", setup_cede_offline);
65 enum cpu_state_vals get_cpu_current_state(int cpu)
67 return per_cpu(current_state, cpu);
70 void set_cpu_current_state(int cpu, enum cpu_state_vals state)
72 per_cpu(current_state, cpu) = state;
75 enum cpu_state_vals get_preferred_offline_state(int cpu)
77 return per_cpu(preferred_offline_state, cpu);
80 void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
82 per_cpu(preferred_offline_state, cpu) = state;
85 void set_default_offline_state(int cpu)
87 per_cpu(preferred_offline_state, cpu) = default_offline_state;
90 static void rtas_stop_self(void)
92 static struct rtas_args args;
96 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
98 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
100 panic("Alas, I survived.\n");
103 static void pseries_mach_cpu_die(void)
105 unsigned int cpu = smp_processor_id();
106 unsigned int hwcpu = hard_smp_processor_id();
107 u8 cede_latency_hint = 0;
116 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
117 set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
118 if (ppc_md.suspend_disable_cpu)
119 ppc_md.suspend_disable_cpu();
121 cede_latency_hint = 2;
123 get_lppaca()->idle = 1;
124 if (!lppaca_shared_proc(get_lppaca()))
125 get_lppaca()->donate_dedicated_cpu = 1;
127 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
128 while (!prep_irq_for_idle()) {
133 extended_cede_processor(cede_latency_hint);
138 if (!lppaca_shared_proc(get_lppaca()))
139 get_lppaca()->donate_dedicated_cpu = 0;
140 get_lppaca()->idle = 0;
142 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
143 unregister_slb_shadow(hwcpu);
147 * Call to start_secondary_resume() will not return.
148 * Kernel stack will be reset and start_secondary()
149 * will be called to continue the online operation.
151 start_secondary_resume();
155 /* Requested state is CPU_STATE_OFFLINE at this point */
156 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
158 set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
159 unregister_slb_shadow(hwcpu);
162 /* Should never get here... */
167 static int pseries_cpu_disable(void)
169 int cpu = smp_processor_id();
171 set_cpu_online(cpu, false);
172 vdso_data->processorCount--;
174 /*fix boot_cpuid here*/
175 if (cpu == boot_cpuid)
176 boot_cpuid = cpumask_any(cpu_online_mask);
178 /* FIXME: abstract this to not be platform specific later on */
180 xive_smp_disable_cpu();
182 xics_migrate_irqs_away();
187 * pseries_cpu_die: Wait for the cpu to die.
188 * @cpu: logical processor id of the CPU whose death we're awaiting.
190 * This function is called from the context of the thread which is performing
191 * the cpu-offline. Here we wait for long enough to allow the cpu in question
192 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
195 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
198 static void pseries_cpu_die(unsigned int cpu)
202 unsigned int pcpu = get_hard_smp_processor_id(cpu);
204 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
206 for (tries = 0; tries < 5000; tries++) {
207 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
213 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
215 for (tries = 0; tries < 25; tries++) {
216 cpu_status = smp_query_cpu_stopped(pcpu);
217 if (cpu_status == QCSS_STOPPED ||
218 cpu_status == QCSS_HARDWARE_ERROR)
224 if (cpu_status != 0) {
225 printk("Querying DEAD? cpu %i (%i) shows %i\n",
226 cpu, pcpu, cpu_status);
229 /* Isolation and deallocation are definitely done by
230 * drslot_chrp_cpu. If they were not they would be
231 * done here. Change isolate state to Isolate and
232 * change allocation-state to Unusable.
234 paca_ptrs[cpu]->cpu_start = 0;
238 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
239 * here is that a cpu device node may represent up to two logical cpus
240 * in the SMT case. We must honor the assumption in other code that
241 * the logical ids for sibling SMT threads x and y are adjacent, such
242 * that x^1 == y and y^1 == x.
244 static int pseries_add_processor(struct device_node *np)
247 cpumask_var_t candidate_mask, tmp;
248 int err = -ENOSPC, len, nthreads, i;
249 const __be32 *intserv;
251 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
255 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
256 zalloc_cpumask_var(&tmp, GFP_KERNEL);
258 nthreads = len / sizeof(u32);
259 for (i = 0; i < nthreads; i++)
260 cpumask_set_cpu(i, tmp);
262 cpu_maps_update_begin();
264 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
266 /* Get a bitmap of unoccupied slots. */
267 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
268 if (cpumask_empty(candidate_mask)) {
269 /* If we get here, it most likely means that NR_CPUS is
270 * less than the partition's max processors setting.
272 printk(KERN_ERR "Cannot add cpu %pOF; this system configuration"
273 " supports %d logical cpus.\n", np,
274 num_possible_cpus());
278 while (!cpumask_empty(tmp))
279 if (cpumask_subset(tmp, candidate_mask))
280 /* Found a range where we can insert the new cpu(s) */
283 cpumask_shift_left(tmp, tmp, nthreads);
285 if (cpumask_empty(tmp)) {
286 printk(KERN_ERR "Unable to find space in cpu_present_mask for"
287 " processor %s with %d thread(s)\n", np->name,
292 for_each_cpu(cpu, tmp) {
293 BUG_ON(cpu_present(cpu));
294 set_cpu_present(cpu, true);
295 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
299 cpu_maps_update_done();
300 free_cpumask_var(candidate_mask);
301 free_cpumask_var(tmp);
306 * Update the present map for a cpu node which is going away, and set
307 * the hard id in the paca(s) to -1 to be consistent with boot time
308 * convention for non-present cpus.
310 static void pseries_remove_processor(struct device_node *np)
313 int len, nthreads, i;
314 const __be32 *intserv;
317 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
321 nthreads = len / sizeof(u32);
323 cpu_maps_update_begin();
324 for (i = 0; i < nthreads; i++) {
325 thread = be32_to_cpu(intserv[i]);
326 for_each_present_cpu(cpu) {
327 if (get_hard_smp_processor_id(cpu) != thread)
329 BUG_ON(cpu_online(cpu));
330 set_cpu_present(cpu, false);
331 set_hard_smp_processor_id(cpu, -1);
332 update_numa_cpu_lookup_table(cpu, -1);
335 if (cpu >= nr_cpu_ids)
336 printk(KERN_WARNING "Could not find cpu to remove "
337 "with physical id 0x%x\n", thread);
339 cpu_maps_update_done();
342 static int dlpar_online_cpu(struct device_node *dn)
346 int len, nthreads, i;
347 const __be32 *intserv;
350 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
354 nthreads = len / sizeof(u32);
356 cpu_maps_update_begin();
357 for (i = 0; i < nthreads; i++) {
358 thread = be32_to_cpu(intserv[i]);
359 for_each_present_cpu(cpu) {
360 if (get_hard_smp_processor_id(cpu) != thread)
362 BUG_ON(get_cpu_current_state(cpu)
363 != CPU_STATE_OFFLINE);
364 cpu_maps_update_done();
365 timed_topology_update(1);
366 find_and_online_cpu_nid(cpu);
367 rc = device_online(get_cpu_device(cpu));
370 cpu_maps_update_begin();
374 if (cpu == num_possible_cpus())
375 printk(KERN_WARNING "Could not find cpu to online "
376 "with physical id 0x%x\n", thread);
378 cpu_maps_update_done();
385 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
387 struct device_node *child = NULL;
392 /* Assume cpu doesn't exist */
395 for_each_child_of_node(parent, child) {
396 rc = of_property_read_u32(child, "ibm,my-drc-index",
401 if (my_drc_index == drc_index) {
411 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
420 rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
425 if (drc == drc_index)
432 static ssize_t dlpar_cpu_add(u32 drc_index)
434 struct device_node *dn, *parent;
437 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
439 parent = of_find_node_by_path("/cpus");
441 pr_warn("Failed to find CPU root node \"/cpus\"\n");
445 if (dlpar_cpu_exists(parent, drc_index)) {
447 pr_warn("CPU with drc index %x already exists\n", drc_index);
451 if (!valid_cpu_drc_index(parent, drc_index)) {
453 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
457 rc = dlpar_acquire_drc(drc_index);
459 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
465 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
467 pr_warn("Failed call to configure-connector, drc index: %x\n",
469 dlpar_release_drc(drc_index);
474 rc = dlpar_attach_node(dn, parent);
476 /* Regardless we are done with parent now */
481 pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
482 dn->name, rc, drc_index);
484 rc = dlpar_release_drc(drc_index);
486 dlpar_free_cc_nodes(dn);
491 rc = dlpar_online_cpu(dn);
494 pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n",
495 dn->name, rc, drc_index);
497 rc = dlpar_detach_node(dn);
499 dlpar_release_drc(drc_index);
504 pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name,
509 static int dlpar_offline_cpu(struct device_node *dn)
513 int len, nthreads, i;
514 const __be32 *intserv;
517 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
521 nthreads = len / sizeof(u32);
523 cpu_maps_update_begin();
524 for (i = 0; i < nthreads; i++) {
525 thread = be32_to_cpu(intserv[i]);
526 for_each_present_cpu(cpu) {
527 if (get_hard_smp_processor_id(cpu) != thread)
530 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
533 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
534 set_preferred_offline_state(cpu,
536 cpu_maps_update_done();
537 timed_topology_update(1);
538 rc = device_offline(get_cpu_device(cpu));
541 cpu_maps_update_begin();
547 * The cpu is in CPU_STATE_INACTIVE.
548 * Upgrade it's state to CPU_STATE_OFFLINE.
550 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
551 BUG_ON(plpar_hcall_norets(H_PROD, thread)
556 if (cpu == num_possible_cpus())
557 printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread);
559 cpu_maps_update_done();
566 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
570 pr_debug("Attempting to remove CPU %s, drc index: %x\n",
571 dn->name, drc_index);
573 rc = dlpar_offline_cpu(dn);
575 pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc);
579 rc = dlpar_release_drc(drc_index);
581 pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
582 drc_index, dn->name, rc);
583 dlpar_online_cpu(dn);
587 rc = dlpar_detach_node(dn);
591 pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc);
593 rc = dlpar_acquire_drc(drc_index);
595 dlpar_online_cpu(dn);
600 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
604 static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
606 struct device_node *dn;
610 for_each_node_by_type(dn, "cpu") {
611 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
615 if (my_index == drc_index)
622 static int dlpar_cpu_remove_by_index(u32 drc_index)
624 struct device_node *dn;
627 dn = cpu_drc_index_to_dn(drc_index);
629 pr_warn("Cannot find CPU (drc index %x) to remove\n",
634 rc = dlpar_cpu_remove(dn, drc_index);
639 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
641 struct device_node *dn;
645 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not
646 * remove the last CPU.
648 for_each_node_by_type(dn, "cpu") {
651 if (cpus_found > cpus_to_remove) {
656 /* Note that cpus_found is always 1 ahead of the index
657 * into the cpu_drcs array, so we use cpus_found - 1
659 rc = of_property_read_u32(dn, "ibm,my-drc-index",
660 &cpu_drcs[cpus_found - 1]);
662 pr_warn("Error occurred getting drc-index for %s\n",
669 if (cpus_found < cpus_to_remove) {
670 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
671 cpus_found, cpus_to_remove);
672 } else if (cpus_found == cpus_to_remove) {
673 pr_warn("Cannot remove all CPUs\n");
679 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
683 int cpus_removed = 0;
686 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
688 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
692 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
693 if (cpus_found <= cpus_to_remove) {
698 for (i = 0; i < cpus_to_remove; i++) {
699 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
706 if (cpus_removed != cpus_to_remove) {
707 pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
709 for (i = 0; i < cpus_removed; i++)
710 dlpar_cpu_add(cpu_drcs[i]);
721 static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add)
723 struct device_node *parent;
727 parent = of_find_node_by_path("/cpus");
729 pr_warn("Could not find CPU root node in device tree\n");
734 /* Search the ibm,drc-indexes array for possible CPU drcs to
735 * add. Note that the format of the ibm,drc-indexes array is
736 * the number of entries in the array followed by the array
737 * of drc values so we start looking at index = 1.
740 while (cpus_found < cpus_to_add) {
743 rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
748 if (dlpar_cpu_exists(parent, drc))
751 cpu_drcs[cpus_found++] = drc;
758 static int dlpar_cpu_add_by_count(u32 cpus_to_add)
765 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
767 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
771 cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add);
772 if (cpus_found < cpus_to_add) {
773 pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
774 cpus_found, cpus_to_add);
779 for (i = 0; i < cpus_to_add; i++) {
780 rc = dlpar_cpu_add(cpu_drcs[i]);
787 if (cpus_added < cpus_to_add) {
788 pr_warn("CPU hot-add failed, removing any added CPUs\n");
790 for (i = 0; i < cpus_added; i++)
791 dlpar_cpu_remove_by_index(cpu_drcs[i]);
802 int dlpar_cpu_readd(int cpu)
804 struct device_node *dn;
809 dev = get_cpu_device(cpu);
812 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
814 rc = dlpar_cpu_remove_by_index(drc_index);
816 rc = dlpar_cpu_add(drc_index);
821 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
823 u32 count, drc_index;
826 count = hp_elog->_drc_u.drc_count;
827 drc_index = hp_elog->_drc_u.drc_index;
829 lock_device_hotplug();
831 switch (hp_elog->action) {
832 case PSERIES_HP_ELOG_ACTION_REMOVE:
833 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
834 rc = dlpar_cpu_remove_by_count(count);
835 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
836 rc = dlpar_cpu_remove_by_index(drc_index);
840 case PSERIES_HP_ELOG_ACTION_ADD:
841 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
842 rc = dlpar_cpu_add_by_count(count);
843 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
844 rc = dlpar_cpu_add(drc_index);
849 pr_err("Invalid action (%d) specified\n", hp_elog->action);
854 unlock_device_hotplug();
858 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
860 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
865 rc = kstrtou32(buf, 0, &drc_index);
869 rc = dlpar_cpu_add(drc_index);
871 return rc ? rc : count;
874 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
876 struct device_node *dn;
880 dn = of_find_node_by_path(buf);
884 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
890 rc = dlpar_cpu_remove(dn, drc_index);
893 return rc ? rc : count;
896 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
898 static int pseries_smp_notifier(struct notifier_block *nb,
899 unsigned long action, void *data)
901 struct of_reconfig_data *rd = data;
905 case OF_RECONFIG_ATTACH_NODE:
906 err = pseries_add_processor(rd->dn);
908 case OF_RECONFIG_DETACH_NODE:
909 pseries_remove_processor(rd->dn);
912 return notifier_from_errno(err);
915 static struct notifier_block pseries_smp_nb = {
916 .notifier_call = pseries_smp_notifier,
919 #define MAX_CEDE_LATENCY_LEVELS 4
920 #define CEDE_LATENCY_PARAM_LENGTH 10
921 #define CEDE_LATENCY_PARAM_MAX_LENGTH \
922 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
923 #define CEDE_LATENCY_TOKEN 45
925 static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH];
927 static int parse_cede_parameters(void)
929 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH);
930 return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
933 __pa(cede_parameters),
934 CEDE_LATENCY_PARAM_MAX_LENGTH);
937 static int __init pseries_cpu_hotplug_init(void)
942 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
943 ppc_md.cpu_probe = dlpar_cpu_probe;
944 ppc_md.cpu_release = dlpar_cpu_release;
945 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
947 rtas_stop_self_token = rtas_token("stop-self");
948 qcss_tok = rtas_token("query-cpu-stopped-state");
950 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
951 qcss_tok == RTAS_UNKNOWN_SERVICE) {
952 printk(KERN_INFO "CPU Hotplug not supported by firmware "
957 ppc_md.cpu_die = pseries_mach_cpu_die;
958 smp_ops->cpu_disable = pseries_cpu_disable;
959 smp_ops->cpu_die = pseries_cpu_die;
961 /* Processors can be added/removed only on LPAR */
962 if (firmware_has_feature(FW_FEATURE_LPAR)) {
963 of_reconfig_notifier_register(&pseries_smp_nb);
964 cpu_maps_update_begin();
965 if (cede_offline_enabled && parse_cede_parameters() == 0) {
966 default_offline_state = CPU_STATE_INACTIVE;
967 for_each_online_cpu(cpu)
968 set_default_offline_state(cpu);
970 cpu_maps_update_done();
975 machine_arch_initcall(pseries, pseries_cpu_hotplug_init);