GNU Linux-libre 6.8.9-gnu
[releases.git] / arch / powerpc / platforms / pseries / hotplug-cpu.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries CPU Hotplug infrastructure.
4  *
5  * Split out from arch/powerpc/platforms/pseries/setup.c
6  *  arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
7  *
8  * Peter Bergner, IBM   March 2001.
9  * Copyright (C) 2001 IBM.
10  * Dave Engebretsen, Peter Bergner, and
11  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12  * Plus various changes from other IBM teams...
13  *
14  * Copyright (C) 2006 Michael Ellerman, IBM Corporation
15  */
16
17 #define pr_fmt(fmt)     "pseries-hotplug-cpu: " fmt
18
19 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>        /* for idle_task_exit */
23 #include <linux/sched/hotplug.h>
24 #include <linux/cpu.h>
25 #include <linux/of.h>
26 #include <linux/slab.h>
27 #include <asm/prom.h>
28 #include <asm/rtas.h>
29 #include <asm/firmware.h>
30 #include <asm/machdep.h>
31 #include <asm/vdso_datapage.h>
32 #include <asm/xics.h>
33 #include <asm/xive.h>
34 #include <asm/plpar_wrappers.h>
35 #include <asm/topology.h>
36
37 #include "pseries.h"
38
39 /* This version can't take the spinlock, because it never returns */
40 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
41
42 /*
43  * Record the CPU ids used on each nodes.
44  * Protected by cpu_add_remove_lock.
45  */
46 static cpumask_var_t node_recorded_ids_map[MAX_NUMNODES];
47
48 static void rtas_stop_self(void)
49 {
50         static struct rtas_args args;
51
52         local_irq_disable();
53
54         BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
55
56         rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
57
58         panic("Alas, I survived.\n");
59 }
60
61 static void pseries_cpu_offline_self(void)
62 {
63         unsigned int hwcpu = hard_smp_processor_id();
64
65         local_irq_disable();
66         idle_task_exit();
67         if (xive_enabled())
68                 xive_teardown_cpu();
69         else
70                 xics_teardown_cpu();
71
72         unregister_slb_shadow(hwcpu);
73         unregister_vpa(hwcpu);
74         rtas_stop_self();
75
76         /* Should never get here... */
77         BUG();
78         for(;;);
79 }
80
81 static int pseries_cpu_disable(void)
82 {
83         int cpu = smp_processor_id();
84
85         set_cpu_online(cpu, false);
86         vdso_data->processorCount--;
87
88         /*fix boot_cpuid here*/
89         if (cpu == boot_cpuid)
90                 boot_cpuid = cpumask_any(cpu_online_mask);
91
92         /* FIXME: abstract this to not be platform specific later on */
93         if (xive_enabled())
94                 xive_smp_disable_cpu();
95         else
96                 xics_migrate_irqs_away();
97
98         cleanup_cpu_mmu_context();
99
100         return 0;
101 }
102
103 /*
104  * pseries_cpu_die: Wait for the cpu to die.
105  * @cpu: logical processor id of the CPU whose death we're awaiting.
106  *
107  * This function is called from the context of the thread which is performing
108  * the cpu-offline. Here we wait for long enough to allow the cpu in question
109  * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
110  * notifications.
111  *
112  * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to
113  * self-destruct.
114  */
115 static void pseries_cpu_die(unsigned int cpu)
116 {
117         int cpu_status = 1;
118         unsigned int pcpu = get_hard_smp_processor_id(cpu);
119         unsigned long timeout = jiffies + msecs_to_jiffies(120000);
120
121         while (true) {
122                 cpu_status = smp_query_cpu_stopped(pcpu);
123                 if (cpu_status == QCSS_STOPPED ||
124                     cpu_status == QCSS_HARDWARE_ERROR)
125                         break;
126
127                 if (time_after(jiffies, timeout)) {
128                         pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
129                                 cpu, pcpu);
130                         timeout = jiffies + msecs_to_jiffies(120000);
131                 }
132
133                 cond_resched();
134         }
135
136         if (cpu_status == QCSS_HARDWARE_ERROR) {
137                 pr_warn("CPU %i (hwid %i) reported error while dying\n",
138                         cpu, pcpu);
139         }
140
141         paca_ptrs[cpu]->cpu_start = 0;
142 }
143
144 /**
145  * find_cpu_id_range - found a linear ranger of @nthreads free CPU ids.
146  * @nthreads : the number of threads (cpu ids)
147  * @assigned_node : the node it belongs to or NUMA_NO_NODE if free ids from any
148  *                  node can be peek.
149  * @cpu_mask: the returned CPU mask.
150  *
151  * Returns 0 on success.
152  */
153 static int find_cpu_id_range(unsigned int nthreads, int assigned_node,
154                              cpumask_var_t *cpu_mask)
155 {
156         cpumask_var_t candidate_mask;
157         unsigned int cpu, node;
158         int rc = -ENOSPC;
159
160         if (!zalloc_cpumask_var(&candidate_mask, GFP_KERNEL))
161                 return -ENOMEM;
162
163         cpumask_clear(*cpu_mask);
164         for (cpu = 0; cpu < nthreads; cpu++)
165                 cpumask_set_cpu(cpu, *cpu_mask);
166
167         BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
168
169         /* Get a bitmap of unoccupied slots. */
170         cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
171
172         if (assigned_node != NUMA_NO_NODE) {
173                 /*
174                  * Remove free ids previously assigned on the other nodes. We
175                  * can walk only online nodes because once a node became online
176                  * it is not turned offlined back.
177                  */
178                 for_each_online_node(node) {
179                         if (node == assigned_node)
180                                 continue;
181                         cpumask_andnot(candidate_mask, candidate_mask,
182                                        node_recorded_ids_map[node]);
183                 }
184         }
185
186         if (cpumask_empty(candidate_mask))
187                 goto out;
188
189         while (!cpumask_empty(*cpu_mask)) {
190                 if (cpumask_subset(*cpu_mask, candidate_mask))
191                         /* Found a range where we can insert the new cpu(s) */
192                         break;
193                 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads);
194         }
195
196         if (!cpumask_empty(*cpu_mask))
197                 rc = 0;
198
199 out:
200         free_cpumask_var(candidate_mask);
201         return rc;
202 }
203
204 /*
205  * Update cpu_present_mask and paca(s) for a new cpu node.  The wrinkle
206  * here is that a cpu device node may represent multiple logical cpus
207  * in the SMT case.  We must honor the assumption in other code that
208  * the logical ids for sibling SMT threads x and y are adjacent, such
209  * that x^1 == y and y^1 == x.
210  */
211 static int pseries_add_processor(struct device_node *np)
212 {
213         int len, nthreads, node, cpu, assigned_node;
214         int rc = 0;
215         cpumask_var_t cpu_mask;
216         const __be32 *intserv;
217
218         intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
219         if (!intserv)
220                 return 0;
221
222         nthreads = len / sizeof(u32);
223
224         if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
225                 return -ENOMEM;
226
227         /*
228          * Fetch from the DT nodes read by dlpar_configure_connector() the NUMA
229          * node id the added CPU belongs to.
230          */
231         node = of_node_to_nid(np);
232         if (node < 0 || !node_possible(node))
233                 node = first_online_node;
234
235         BUG_ON(node == NUMA_NO_NODE);
236         assigned_node = node;
237
238         cpu_maps_update_begin();
239
240         rc = find_cpu_id_range(nthreads, node, &cpu_mask);
241         if (rc && nr_node_ids > 1) {
242                 /*
243                  * Try again, considering the free CPU ids from the other node.
244                  */
245                 node = NUMA_NO_NODE;
246                 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask);
247         }
248
249         if (rc) {
250                 pr_err("Cannot add cpu %pOF; this system configuration"
251                        " supports %d logical cpus.\n", np, num_possible_cpus());
252                 goto out;
253         }
254
255         for_each_cpu(cpu, cpu_mask) {
256                 BUG_ON(cpu_present(cpu));
257                 set_cpu_present(cpu, true);
258                 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
259         }
260
261         /* Record the newly used CPU ids for the associate node. */
262         cpumask_or(node_recorded_ids_map[assigned_node],
263                    node_recorded_ids_map[assigned_node], cpu_mask);
264
265         /*
266          * If node is set to NUMA_NO_NODE, CPU ids have be reused from
267          * another node, remove them from its mask.
268          */
269         if (node == NUMA_NO_NODE) {
270                 cpu = cpumask_first(cpu_mask);
271                 pr_warn("Reusing free CPU ids %d-%d from another node\n",
272                         cpu, cpu + nthreads - 1);
273                 for_each_online_node(node) {
274                         if (node == assigned_node)
275                                 continue;
276                         cpumask_andnot(node_recorded_ids_map[node],
277                                        node_recorded_ids_map[node],
278                                        cpu_mask);
279                 }
280         }
281
282 out:
283         cpu_maps_update_done();
284         free_cpumask_var(cpu_mask);
285         return rc;
286 }
287
288 /*
289  * Update the present map for a cpu node which is going away, and set
290  * the hard id in the paca(s) to -1 to be consistent with boot time
291  * convention for non-present cpus.
292  */
293 static void pseries_remove_processor(struct device_node *np)
294 {
295         unsigned int cpu;
296         int len, nthreads, i;
297         const __be32 *intserv;
298         u32 thread;
299
300         intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
301         if (!intserv)
302                 return;
303
304         nthreads = len / sizeof(u32);
305
306         cpu_maps_update_begin();
307         for (i = 0; i < nthreads; i++) {
308                 thread = be32_to_cpu(intserv[i]);
309                 for_each_present_cpu(cpu) {
310                         if (get_hard_smp_processor_id(cpu) != thread)
311                                 continue;
312                         BUG_ON(cpu_online(cpu));
313                         set_cpu_present(cpu, false);
314                         set_hard_smp_processor_id(cpu, -1);
315                         update_numa_cpu_lookup_table(cpu, -1);
316                         break;
317                 }
318                 if (cpu >= nr_cpu_ids)
319                         printk(KERN_WARNING "Could not find cpu to remove "
320                                "with physical id 0x%x\n", thread);
321         }
322         cpu_maps_update_done();
323 }
324
325 static int dlpar_offline_cpu(struct device_node *dn)
326 {
327         int rc = 0;
328         unsigned int cpu;
329         int len, nthreads, i;
330         const __be32 *intserv;
331         u32 thread;
332
333         intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
334         if (!intserv)
335                 return -EINVAL;
336
337         nthreads = len / sizeof(u32);
338
339         cpu_maps_update_begin();
340         for (i = 0; i < nthreads; i++) {
341                 thread = be32_to_cpu(intserv[i]);
342                 for_each_present_cpu(cpu) {
343                         if (get_hard_smp_processor_id(cpu) != thread)
344                                 continue;
345
346                         if (!cpu_online(cpu))
347                                 break;
348
349                         /*
350                          * device_offline() will return -EBUSY (via cpu_down()) if there
351                          * is only one CPU left. Check it here to fail earlier and with a
352                          * more informative error message, while also retaining the
353                          * cpu_add_remove_lock to be sure that no CPUs are being
354                          * online/offlined during this check.
355                          */
356                         if (num_online_cpus() == 1) {
357                                 pr_warn("Unable to remove last online CPU %pOFn\n", dn);
358                                 rc = -EBUSY;
359                                 goto out_unlock;
360                         }
361
362                         cpu_maps_update_done();
363                         rc = device_offline(get_cpu_device(cpu));
364                         if (rc)
365                                 goto out;
366                         cpu_maps_update_begin();
367                         break;
368                 }
369                 if (cpu == num_possible_cpus()) {
370                         pr_warn("Could not find cpu to offline with physical id 0x%x\n",
371                                 thread);
372                 }
373         }
374 out_unlock:
375         cpu_maps_update_done();
376
377 out:
378         return rc;
379 }
380
381 static int dlpar_online_cpu(struct device_node *dn)
382 {
383         int rc = 0;
384         unsigned int cpu;
385         int len, nthreads, i;
386         const __be32 *intserv;
387         u32 thread;
388
389         intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
390         if (!intserv)
391                 return -EINVAL;
392
393         nthreads = len / sizeof(u32);
394
395         cpu_maps_update_begin();
396         for (i = 0; i < nthreads; i++) {
397                 thread = be32_to_cpu(intserv[i]);
398                 for_each_present_cpu(cpu) {
399                         if (get_hard_smp_processor_id(cpu) != thread)
400                                 continue;
401
402                         if (!topology_is_primary_thread(cpu)) {
403                                 if (cpu_smt_control != CPU_SMT_ENABLED)
404                                         break;
405                                 if (!topology_smt_thread_allowed(cpu))
406                                         break;
407                         }
408
409                         cpu_maps_update_done();
410                         find_and_update_cpu_nid(cpu);
411                         rc = device_online(get_cpu_device(cpu));
412                         if (rc) {
413                                 dlpar_offline_cpu(dn);
414                                 goto out;
415                         }
416                         cpu_maps_update_begin();
417
418                         break;
419                 }
420                 if (cpu == num_possible_cpus())
421                         printk(KERN_WARNING "Could not find cpu to online "
422                                "with physical id 0x%x\n", thread);
423         }
424         cpu_maps_update_done();
425
426 out:
427         return rc;
428
429 }
430
431 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
432 {
433         struct device_node *child = NULL;
434         u32 my_drc_index;
435         bool found;
436         int rc;
437
438         /* Assume cpu doesn't exist */
439         found = false;
440
441         for_each_child_of_node(parent, child) {
442                 rc = of_property_read_u32(child, "ibm,my-drc-index",
443                                           &my_drc_index);
444                 if (rc)
445                         continue;
446
447                 if (my_drc_index == drc_index) {
448                         of_node_put(child);
449                         found = true;
450                         break;
451                 }
452         }
453
454         return found;
455 }
456
457 static bool drc_info_valid_index(struct device_node *parent, u32 drc_index)
458 {
459         struct property *info;
460         struct of_drc_info drc;
461         const __be32 *value;
462         u32 index;
463         int count, i, j;
464
465         info = of_find_property(parent, "ibm,drc-info", NULL);
466         if (!info)
467                 return false;
468
469         value = of_prop_next_u32(info, NULL, &count);
470
471         /* First value of ibm,drc-info is number of drc-info records */
472         if (value)
473                 value++;
474         else
475                 return false;
476
477         for (i = 0; i < count; i++) {
478                 if (of_read_drc_info_cell(&info, &value, &drc))
479                         return false;
480
481                 if (strncmp(drc.drc_type, "CPU", 3))
482                         break;
483
484                 if (drc_index > drc.last_drc_index)
485                         continue;
486
487                 index = drc.drc_index_start;
488                 for (j = 0; j < drc.num_sequential_elems; j++) {
489                         if (drc_index == index)
490                                 return true;
491
492                         index += drc.sequential_inc;
493                 }
494         }
495
496         return false;
497 }
498
499 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
500 {
501         bool found = false;
502         int rc, index;
503
504         if (of_property_present(parent, "ibm,drc-info"))
505                 return drc_info_valid_index(parent, drc_index);
506
507         /* Note that the format of the ibm,drc-indexes array is
508          * the number of entries in the array followed by the array
509          * of drc values so we start looking at index = 1.
510          */
511         index = 1;
512         while (!found) {
513                 u32 drc;
514
515                 rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
516                                                 index++, &drc);
517
518                 if (rc)
519                         break;
520
521                 if (drc == drc_index)
522                         found = true;
523         }
524
525         return found;
526 }
527
528 static int pseries_cpuhp_attach_nodes(struct device_node *dn)
529 {
530         struct of_changeset cs;
531         int ret;
532
533         /*
534          * This device node is unattached but may have siblings; open-code the
535          * traversal.
536          */
537         for (of_changeset_init(&cs); dn != NULL; dn = dn->sibling) {
538                 ret = of_changeset_attach_node(&cs, dn);
539                 if (ret)
540                         goto out;
541         }
542
543         ret = of_changeset_apply(&cs);
544 out:
545         of_changeset_destroy(&cs);
546         return ret;
547 }
548
549 static ssize_t dlpar_cpu_add(u32 drc_index)
550 {
551         struct device_node *dn, *parent;
552         int rc, saved_rc;
553
554         pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
555
556         parent = of_find_node_by_path("/cpus");
557         if (!parent) {
558                 pr_warn("Failed to find CPU root node \"/cpus\"\n");
559                 return -ENODEV;
560         }
561
562         if (dlpar_cpu_exists(parent, drc_index)) {
563                 of_node_put(parent);
564                 pr_warn("CPU with drc index %x already exists\n", drc_index);
565                 return -EINVAL;
566         }
567
568         if (!valid_cpu_drc_index(parent, drc_index)) {
569                 of_node_put(parent);
570                 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
571                 return -EINVAL;
572         }
573
574         rc = dlpar_acquire_drc(drc_index);
575         if (rc) {
576                 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
577                         rc, drc_index);
578                 of_node_put(parent);
579                 return -EINVAL;
580         }
581
582         dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
583         if (!dn) {
584                 pr_warn("Failed call to configure-connector, drc index: %x\n",
585                         drc_index);
586                 dlpar_release_drc(drc_index);
587                 of_node_put(parent);
588                 return -EINVAL;
589         }
590
591         rc = pseries_cpuhp_attach_nodes(dn);
592
593         /* Regardless we are done with parent now */
594         of_node_put(parent);
595
596         if (rc) {
597                 saved_rc = rc;
598                 pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n",
599                         dn, rc, drc_index);
600
601                 rc = dlpar_release_drc(drc_index);
602                 if (!rc)
603                         dlpar_free_cc_nodes(dn);
604
605                 return saved_rc;
606         }
607
608         update_numa_distance(dn);
609
610         rc = dlpar_online_cpu(dn);
611         if (rc) {
612                 saved_rc = rc;
613                 pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n",
614                         dn, rc, drc_index);
615
616                 rc = dlpar_detach_node(dn);
617                 if (!rc)
618                         dlpar_release_drc(drc_index);
619
620                 return saved_rc;
621         }
622
623         pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn,
624                  drc_index);
625         return rc;
626 }
627
628 static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn)
629 {
630         unsigned int use_count = 0;
631         struct device_node *dn, *tn;
632
633         WARN_ON(!of_node_is_type(cachedn, "cache"));
634
635         for_each_of_cpu_node(dn) {
636                 tn = of_find_next_cache_node(dn);
637                 of_node_put(tn);
638                 if (tn == cachedn)
639                         use_count++;
640         }
641
642         for_each_node_by_type(dn, "cache") {
643                 tn = of_find_next_cache_node(dn);
644                 of_node_put(tn);
645                 if (tn == cachedn)
646                         use_count++;
647         }
648
649         return use_count;
650 }
651
652 static int pseries_cpuhp_detach_nodes(struct device_node *cpudn)
653 {
654         struct device_node *dn;
655         struct of_changeset cs;
656         int ret = 0;
657
658         of_changeset_init(&cs);
659         ret = of_changeset_detach_node(&cs, cpudn);
660         if (ret)
661                 goto out;
662
663         dn = cpudn;
664         while ((dn = of_find_next_cache_node(dn))) {
665                 if (pseries_cpuhp_cache_use_count(dn) > 1) {
666                         of_node_put(dn);
667                         break;
668                 }
669
670                 ret = of_changeset_detach_node(&cs, dn);
671                 of_node_put(dn);
672                 if (ret)
673                         goto out;
674         }
675
676         ret = of_changeset_apply(&cs);
677 out:
678         of_changeset_destroy(&cs);
679         return ret;
680 }
681
682 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
683 {
684         int rc;
685
686         pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n",
687                  dn, drc_index);
688
689         rc = dlpar_offline_cpu(dn);
690         if (rc) {
691                 pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc);
692                 return -EINVAL;
693         }
694
695         rc = dlpar_release_drc(drc_index);
696         if (rc) {
697                 pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
698                         drc_index, dn, rc);
699                 dlpar_online_cpu(dn);
700                 return rc;
701         }
702
703         rc = pseries_cpuhp_detach_nodes(dn);
704         if (rc) {
705                 int saved_rc = rc;
706
707                 pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc);
708
709                 rc = dlpar_acquire_drc(drc_index);
710                 if (!rc)
711                         dlpar_online_cpu(dn);
712
713                 return saved_rc;
714         }
715
716         pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
717         return 0;
718 }
719
720 static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
721 {
722         struct device_node *dn;
723         u32 my_index;
724         int rc;
725
726         for_each_node_by_type(dn, "cpu") {
727                 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
728                 if (rc)
729                         continue;
730
731                 if (my_index == drc_index)
732                         break;
733         }
734
735         return dn;
736 }
737
738 static int dlpar_cpu_remove_by_index(u32 drc_index)
739 {
740         struct device_node *dn;
741         int rc;
742
743         dn = cpu_drc_index_to_dn(drc_index);
744         if (!dn) {
745                 pr_warn("Cannot find CPU (drc index %x) to remove\n",
746                         drc_index);
747                 return -ENODEV;
748         }
749
750         rc = dlpar_cpu_remove(dn, drc_index);
751         of_node_put(dn);
752         return rc;
753 }
754
755 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
756 {
757         u32 drc_index;
758         int rc;
759
760         drc_index = hp_elog->_drc_u.drc_index;
761
762         lock_device_hotplug();
763
764         switch (hp_elog->action) {
765         case PSERIES_HP_ELOG_ACTION_REMOVE:
766                 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
767                         rc = dlpar_cpu_remove_by_index(drc_index);
768                         /*
769                          * Setting the isolation state of an UNISOLATED/CONFIGURED
770                          * device to UNISOLATE is a no-op, but the hypervisor can
771                          * use it as a hint that the CPU removal failed.
772                          */
773                         if (rc)
774                                 dlpar_unisolate_drc(drc_index);
775                 }
776                 else
777                         rc = -EINVAL;
778                 break;
779         case PSERIES_HP_ELOG_ACTION_ADD:
780                 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
781                         rc = dlpar_cpu_add(drc_index);
782                 else
783                         rc = -EINVAL;
784                 break;
785         default:
786                 pr_err("Invalid action (%d) specified\n", hp_elog->action);
787                 rc = -EINVAL;
788                 break;
789         }
790
791         unlock_device_hotplug();
792         return rc;
793 }
794
795 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
796
797 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
798 {
799         u32 drc_index;
800         int rc;
801
802         rc = kstrtou32(buf, 0, &drc_index);
803         if (rc)
804                 return -EINVAL;
805
806         rc = dlpar_cpu_add(drc_index);
807
808         return rc ? rc : count;
809 }
810
811 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
812 {
813         struct device_node *dn;
814         u32 drc_index;
815         int rc;
816
817         dn = of_find_node_by_path(buf);
818         if (!dn)
819                 return -EINVAL;
820
821         rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
822         if (rc) {
823                 of_node_put(dn);
824                 return -EINVAL;
825         }
826
827         rc = dlpar_cpu_remove(dn, drc_index);
828         of_node_put(dn);
829
830         return rc ? rc : count;
831 }
832
833 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
834
835 static int pseries_smp_notifier(struct notifier_block *nb,
836                                 unsigned long action, void *data)
837 {
838         struct of_reconfig_data *rd = data;
839         int err = 0;
840
841         switch (action) {
842         case OF_RECONFIG_ATTACH_NODE:
843                 err = pseries_add_processor(rd->dn);
844                 break;
845         case OF_RECONFIG_DETACH_NODE:
846                 pseries_remove_processor(rd->dn);
847                 break;
848         }
849         return notifier_from_errno(err);
850 }
851
852 static struct notifier_block pseries_smp_nb = {
853         .notifier_call = pseries_smp_notifier,
854 };
855
856 void __init pseries_cpu_hotplug_init(void)
857 {
858         int qcss_tok;
859
860         rtas_stop_self_token = rtas_function_token(RTAS_FN_STOP_SELF);
861         qcss_tok = rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE);
862
863         if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
864                         qcss_tok == RTAS_UNKNOWN_SERVICE) {
865                 printk(KERN_INFO "CPU Hotplug not supported by firmware "
866                                 "- disabling.\n");
867                 return;
868         }
869
870         smp_ops->cpu_offline_self = pseries_cpu_offline_self;
871         smp_ops->cpu_disable = pseries_cpu_disable;
872         smp_ops->cpu_die = pseries_cpu_die;
873 }
874
875 static int __init pseries_dlpar_init(void)
876 {
877         unsigned int node;
878
879 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
880         ppc_md.cpu_probe = dlpar_cpu_probe;
881         ppc_md.cpu_release = dlpar_cpu_release;
882 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
883
884         /* Processors can be added/removed only on LPAR */
885         if (firmware_has_feature(FW_FEATURE_LPAR)) {
886                 for_each_node(node) {
887                         if (!alloc_cpumask_var_node(&node_recorded_ids_map[node],
888                                                     GFP_KERNEL, node))
889                                 return -ENOMEM;
890
891                         /* Record ids of CPU added at boot time */
892                         cpumask_copy(node_recorded_ids_map[node],
893                                      cpumask_of_node(node));
894                 }
895
896                 of_reconfig_notifier_register(&pseries_smp_nb);
897         }
898
899         return 0;
900 }
901 machine_arch_initcall(pseries, pseries_dlpar_init);