2 * pseries Memory Hotplug infrastructure.
4 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
15 #include <linux/of_address.h>
16 #include <linux/memblock.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/slab.h>
21 #include <asm/firmware.h>
22 #include <asm/machdep.h>
24 #include <asm/sparsemem.h>
25 #include <asm/fadump.h>
28 static bool rtas_hp_event;
30 unsigned long pseries_memory_block_size(void)
32 struct device_node *np;
33 u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
36 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
40 size = of_get_property(np, "ibm,lmb-size", NULL);
42 memblock_size = be64_to_cpup(size);
44 } else if (machine_is(pseries)) {
45 /* This fallback really only applies to pseries */
46 unsigned int memzero_size = 0;
48 np = of_find_node_by_path("/memory@0");
50 if (!of_address_to_resource(np, 0, &r))
51 memzero_size = resource_size(&r);
56 /* We now know the size of memory@0, use this to find
57 * the first memoryblock and get its size.
61 sprintf(buf, "/memory@%x", memzero_size);
62 np = of_find_node_by_path(buf);
64 if (!of_address_to_resource(np, 0, &r))
65 memblock_size = resource_size(&r);
73 static void dlpar_free_property(struct property *prop)
80 static struct property *dlpar_clone_property(struct property *prop,
83 struct property *new_prop;
85 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
89 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
90 new_prop->value = kzalloc(prop_size, GFP_KERNEL);
91 if (!new_prop->name || !new_prop->value) {
92 dlpar_free_property(new_prop);
96 memcpy(new_prop->value, prop->value, prop->length);
97 new_prop->length = prop_size;
99 of_property_set_flag(new_prop, OF_DYNAMIC);
103 static struct property *dlpar_clone_drconf_property(struct device_node *dn)
105 struct property *prop, *new_prop;
106 struct of_drconf_cell *lmbs;
110 prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
114 new_prop = dlpar_clone_property(prop, prop->length);
118 /* Convert the property to cpu endian-ness */
120 *p = be32_to_cpu(*p);
123 lmbs = (struct of_drconf_cell *)p;
125 for (i = 0; i < num_lmbs; i++) {
126 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
127 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
128 lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
129 lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
135 static void dlpar_update_drconf_property(struct device_node *dn,
136 struct property *prop)
138 struct of_drconf_cell *lmbs;
142 /* Convert the property back to BE */
145 *p = cpu_to_be32(*p);
148 lmbs = (struct of_drconf_cell *)p;
149 for (i = 0; i < num_lmbs; i++) {
150 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
151 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
152 lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
153 lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
156 rtas_hp_event = true;
157 of_update_property(dn, prop);
158 rtas_hp_event = false;
161 static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
163 struct device_node *dn;
164 struct property *prop;
165 struct of_drconf_cell *lmbs;
169 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
173 prop = dlpar_clone_drconf_property(dn);
181 lmbs = (struct of_drconf_cell *)p;
183 for (i = 0; i < num_lmbs; i++) {
184 if (lmbs[i].drc_index == lmb->drc_index) {
185 lmbs[i].flags = lmb->flags;
186 lmbs[i].aa_index = lmb->aa_index;
188 dlpar_update_drconf_property(dn, prop);
197 static u32 find_aa_index(struct device_node *dr_node,
198 struct property *ala_prop, const u32 *lmb_assoc)
202 int aa_arrays, aa_array_entries, aa_array_sz;
206 * The ibm,associativity-lookup-arrays property is defined to be
207 * a 32-bit value specifying the number of associativity arrays
208 * followed by a 32-bitvalue specifying the number of entries per
209 * array, followed by the associativity arrays.
211 assoc_arrays = ala_prop->value;
213 aa_arrays = be32_to_cpu(assoc_arrays[0]);
214 aa_array_entries = be32_to_cpu(assoc_arrays[1]);
215 aa_array_sz = aa_array_entries * sizeof(u32);
218 for (i = 0; i < aa_arrays; i++) {
219 index = (i * aa_array_entries) + 2;
221 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
228 if (aa_index == -1) {
229 struct property *new_prop;
232 new_prop_size = ala_prop->length + aa_array_sz;
233 new_prop = dlpar_clone_property(ala_prop, new_prop_size);
237 assoc_arrays = new_prop->value;
239 /* increment the number of entries in the lookup array */
240 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
242 /* copy the new associativity into the lookup array */
243 index = aa_arrays * aa_array_entries + 2;
244 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
246 of_update_property(dr_node, new_prop);
249 * The associativity lookup array index for this lmb is
250 * number of entries - 1 since we added its associativity
251 * to the end of the lookup array.
253 aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
259 static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
261 struct device_node *parent, *lmb_node, *dr_node;
262 struct property *ala_prop;
263 const u32 *lmb_assoc;
266 parent = of_find_node_by_path("/");
270 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
276 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
278 dlpar_free_cc_nodes(lmb_node);
282 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
284 dlpar_free_cc_nodes(lmb_node);
288 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
291 of_node_put(dr_node);
292 dlpar_free_cc_nodes(lmb_node);
296 aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
298 of_node_put(dr_node);
299 dlpar_free_cc_nodes(lmb_node);
303 static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb)
307 lmb->flags |= DRCONF_MEM_ASSIGNED;
309 aa_index = lookup_lmb_associativity_index(lmb);
311 pr_err("Couldn't find associativity index for drc index %x\n",
316 lmb->aa_index = aa_index;
317 return dlpar_update_device_tree_lmb(lmb);
320 static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
322 lmb->flags &= ~DRCONF_MEM_ASSIGNED;
323 lmb->aa_index = 0xffffffff;
324 return dlpar_update_device_tree_lmb(lmb);
327 static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
329 unsigned long section_nr;
330 struct mem_section *mem_sect;
331 struct memory_block *mem_block;
333 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
334 mem_sect = __nr_to_section(section_nr);
336 mem_block = find_memory_block(mem_sect);
340 static int dlpar_change_lmb_state(struct of_drconf_cell *lmb, bool online)
342 struct memory_block *mem_block;
345 mem_block = lmb_to_memblock(lmb);
349 if (online && mem_block->dev.offline)
350 rc = device_online(&mem_block->dev);
351 else if (!online && !mem_block->dev.offline)
352 rc = device_offline(&mem_block->dev);
356 put_device(&mem_block->dev);
361 static int dlpar_online_lmb(struct of_drconf_cell *lmb)
363 return dlpar_change_lmb_state(lmb, true);
366 #ifdef CONFIG_MEMORY_HOTREMOVE
367 static int dlpar_offline_lmb(struct of_drconf_cell *lmb)
369 return dlpar_change_lmb_state(lmb, false);
372 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
374 unsigned long block_sz, start_pfn;
375 int sections_per_block;
378 start_pfn = base >> PAGE_SHIFT;
380 lock_device_hotplug();
382 if (!pfn_valid(start_pfn))
385 block_sz = pseries_memory_block_size();
386 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
387 nid = memory_add_physaddr_to_nid(base);
389 for (i = 0; i < sections_per_block; i++) {
390 remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
391 base += MIN_MEMORY_BLOCK_SIZE;
395 /* Update memory regions for memory remove */
396 memblock_remove(base, memblock_size);
397 unlock_device_hotplug();
401 static int pseries_remove_mem_node(struct device_node *np)
406 unsigned int lmb_size;
410 * Check to see if we are actually removing memory
412 type = of_get_property(np, "device_type", NULL);
413 if (type == NULL || strcmp(type, "memory") != 0)
417 * Find the base address and size of the memblock
419 regs = of_get_property(np, "reg", NULL);
423 base = be64_to_cpu(*(unsigned long *)regs);
424 lmb_size = be32_to_cpu(regs[3]);
426 pseries_remove_memblock(base, lmb_size);
430 static bool lmb_is_removable(struct of_drconf_cell *lmb)
432 int i, scns_per_block;
434 unsigned long pfn, block_sz;
437 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
440 block_sz = memory_block_size_bytes();
441 scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
442 phys_addr = lmb->base_addr;
444 #ifdef CONFIG_FA_DUMP
446 * Don't hot-remove memory that falls in fadump boot memory area
447 * and memory that is reserved for capturing old kernel memory.
449 if (is_fadump_memory_area(phys_addr, block_sz))
453 for (i = 0; i < scns_per_block; i++) {
454 pfn = PFN_DOWN(phys_addr);
455 if (!pfn_present(pfn)) {
456 phys_addr += MIN_MEMORY_BLOCK_SIZE;
460 rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
461 phys_addr += MIN_MEMORY_BLOCK_SIZE;
464 return rc ? true : false;
467 static int dlpar_add_lmb(struct of_drconf_cell *);
469 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
471 unsigned long block_sz;
474 if (!lmb_is_removable(lmb))
477 rc = dlpar_offline_lmb(lmb);
481 block_sz = pseries_memory_block_size();
482 nid = memory_add_physaddr_to_nid(lmb->base_addr);
484 remove_memory(nid, lmb->base_addr, block_sz);
486 /* Update memory regions for memory remove */
487 memblock_remove(lmb->base_addr, block_sz);
489 dlpar_remove_device_tree_lmb(lmb);
493 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
494 struct property *prop)
496 struct of_drconf_cell *lmbs;
497 int lmbs_removed = 0;
498 int lmbs_available = 0;
502 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
504 if (lmbs_to_remove == 0)
509 lmbs = (struct of_drconf_cell *)p;
511 /* Validate that there are enough LMBs to satisfy the request */
512 for (i = 0; i < num_lmbs; i++) {
513 if (lmb_is_removable(&lmbs[i]))
517 if (lmbs_available < lmbs_to_remove) {
518 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
519 lmbs_available, lmbs_to_remove);
523 for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
524 rc = dlpar_remove_lmb(&lmbs[i]);
530 /* Mark this lmb so we can add it later if all of the
531 * requested LMBs cannot be removed.
533 lmbs[i].reserved = 1;
536 if (lmbs_removed != lmbs_to_remove) {
537 pr_err("Memory hot-remove failed, adding LMB's back\n");
539 for (i = 0; i < num_lmbs; i++) {
540 if (!lmbs[i].reserved)
543 rc = dlpar_add_lmb(&lmbs[i]);
545 pr_err("Failed to add LMB back, drc index %x\n",
548 lmbs[i].reserved = 0;
553 for (i = 0; i < num_lmbs; i++) {
554 if (!lmbs[i].reserved)
557 dlpar_release_drc(lmbs[i].drc_index);
558 pr_info("Memory at %llx was hot-removed\n",
561 lmbs[i].reserved = 0;
569 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
571 struct of_drconf_cell *lmbs;
576 pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
580 lmbs = (struct of_drconf_cell *)p;
583 for (i = 0; i < num_lmbs; i++) {
584 if (lmbs[i].drc_index == drc_index) {
586 rc = dlpar_remove_lmb(&lmbs[i]);
588 dlpar_release_drc(lmbs[i].drc_index);
598 pr_info("Failed to hot-remove memory at %llx\n",
601 pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
606 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
608 struct of_drconf_cell *lmbs;
613 pr_info("Attempting to update LMB, drc index %x\n", drc_index);
617 lmbs = (struct of_drconf_cell *)p;
620 for (i = 0; i < num_lmbs; i++) {
621 if (lmbs[i].drc_index == drc_index) {
623 rc = dlpar_remove_lmb(&lmbs[i]);
625 rc = dlpar_add_lmb(&lmbs[i]);
627 dlpar_release_drc(lmbs[i].drc_index);
637 pr_info("Failed to update memory at %llx\n",
640 pr_info("Memory at %llx was updated\n", lmbs[i].base_addr);
645 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
646 struct property *prop)
648 struct of_drconf_cell *lmbs;
650 int i, rc, start_lmb_found;
651 int lmbs_available = 0, start_index = 0, end_index;
653 pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
654 lmbs_to_remove, drc_index);
656 if (lmbs_to_remove == 0)
661 lmbs = (struct of_drconf_cell *)p;
664 /* Navigate to drc_index */
665 while (start_index < num_lmbs) {
666 if (lmbs[start_index].drc_index == drc_index) {
674 if (!start_lmb_found)
677 end_index = start_index + lmbs_to_remove;
679 /* Validate that there are enough LMBs to satisfy the request */
680 for (i = start_index; i < end_index; i++) {
681 if (lmbs[i].flags & DRCONF_MEM_RESERVED)
687 if (lmbs_available < lmbs_to_remove)
690 for (i = start_index; i < end_index; i++) {
691 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
694 rc = dlpar_remove_lmb(&lmbs[i]);
698 lmbs[i].reserved = 1;
702 pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
704 for (i = start_index; i < end_index; i++) {
705 if (!lmbs[i].reserved)
708 rc = dlpar_add_lmb(&lmbs[i]);
710 pr_err("Failed to add LMB, drc index %x\n",
711 be32_to_cpu(lmbs[i].drc_index));
713 lmbs[i].reserved = 0;
717 for (i = start_index; i < end_index; i++) {
718 if (!lmbs[i].reserved)
721 dlpar_release_drc(lmbs[i].drc_index);
722 pr_info("Memory at %llx (drc index %x) was hot-removed\n",
723 lmbs[i].base_addr, lmbs[i].drc_index);
725 lmbs[i].reserved = 0;
733 static inline int pseries_remove_memblock(unsigned long base,
734 unsigned int memblock_size)
738 static inline int pseries_remove_mem_node(struct device_node *np)
742 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
746 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
750 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
751 struct property *prop)
755 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
759 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
764 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
765 struct property *prop)
769 #endif /* CONFIG_MEMORY_HOTREMOVE */
771 static int dlpar_add_lmb(struct of_drconf_cell *lmb)
773 unsigned long block_sz;
776 if (lmb->flags & DRCONF_MEM_ASSIGNED)
779 rc = dlpar_add_device_tree_lmb(lmb);
781 pr_err("Couldn't update device tree for drc index %x\n",
783 dlpar_release_drc(lmb->drc_index);
787 block_sz = memory_block_size_bytes();
789 /* Find the node id for this address */
790 nid = memory_add_physaddr_to_nid(lmb->base_addr);
793 rc = __add_memory(nid, lmb->base_addr, block_sz);
795 dlpar_remove_device_tree_lmb(lmb);
799 rc = dlpar_online_lmb(lmb);
801 remove_memory(nid, lmb->base_addr, block_sz);
802 dlpar_remove_device_tree_lmb(lmb);
804 lmb->flags |= DRCONF_MEM_ASSIGNED;
810 static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
812 struct of_drconf_cell *lmbs;
814 int lmbs_available = 0;
818 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
820 if (lmbs_to_add == 0)
825 lmbs = (struct of_drconf_cell *)p;
827 /* Validate that there are enough LMBs to satisfy the request */
828 for (i = 0; i < num_lmbs; i++) {
829 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
833 if (lmbs_available < lmbs_to_add)
836 for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
837 if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
840 rc = dlpar_acquire_drc(lmbs[i].drc_index);
844 rc = dlpar_add_lmb(&lmbs[i]);
846 dlpar_release_drc(lmbs[i].drc_index);
852 /* Mark this lmb so we can remove it later if all of the
853 * requested LMBs cannot be added.
855 lmbs[i].reserved = 1;
858 if (lmbs_added != lmbs_to_add) {
859 pr_err("Memory hot-add failed, removing any added LMBs\n");
861 for (i = 0; i < num_lmbs; i++) {
862 if (!lmbs[i].reserved)
865 rc = dlpar_remove_lmb(&lmbs[i]);
867 pr_err("Failed to remove LMB, drc index %x\n",
868 be32_to_cpu(lmbs[i].drc_index));
870 dlpar_release_drc(lmbs[i].drc_index);
874 for (i = 0; i < num_lmbs; i++) {
875 if (!lmbs[i].reserved)
878 pr_info("Memory at %llx (drc index %x) was hot-added\n",
879 lmbs[i].base_addr, lmbs[i].drc_index);
880 lmbs[i].reserved = 0;
888 static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
890 struct of_drconf_cell *lmbs;
895 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
899 lmbs = (struct of_drconf_cell *)p;
902 for (i = 0; i < num_lmbs; i++) {
903 if (lmbs[i].drc_index == drc_index) {
905 rc = dlpar_acquire_drc(lmbs[i].drc_index);
907 rc = dlpar_add_lmb(&lmbs[i]);
909 dlpar_release_drc(lmbs[i].drc_index);
920 pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
922 pr_info("Memory at %llx (drc index %x) was hot-added\n",
923 lmbs[i].base_addr, drc_index);
928 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index,
929 struct property *prop)
931 struct of_drconf_cell *lmbs;
933 int i, rc, start_lmb_found;
934 int lmbs_available = 0, start_index = 0, end_index;
936 pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
937 lmbs_to_add, drc_index);
939 if (lmbs_to_add == 0)
944 lmbs = (struct of_drconf_cell *)p;
947 /* Navigate to drc_index */
948 while (start_index < num_lmbs) {
949 if (lmbs[start_index].drc_index == drc_index) {
957 if (!start_lmb_found)
960 end_index = start_index + lmbs_to_add;
962 /* Validate that the LMBs in this range are not reserved */
963 for (i = start_index; i < end_index; i++) {
964 if (lmbs[i].flags & DRCONF_MEM_RESERVED)
970 if (lmbs_available < lmbs_to_add)
973 for (i = start_index; i < end_index; i++) {
974 if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
977 rc = dlpar_acquire_drc(lmbs[i].drc_index);
981 rc = dlpar_add_lmb(&lmbs[i]);
983 dlpar_release_drc(lmbs[i].drc_index);
987 lmbs[i].reserved = 1;
991 pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
993 for (i = start_index; i < end_index; i++) {
994 if (!lmbs[i].reserved)
997 rc = dlpar_remove_lmb(&lmbs[i]);
999 pr_err("Failed to remove LMB, drc index %x\n",
1000 be32_to_cpu(lmbs[i].drc_index));
1002 dlpar_release_drc(lmbs[i].drc_index);
1006 for (i = start_index; i < end_index; i++) {
1007 if (!lmbs[i].reserved)
1010 pr_info("Memory at %llx (drc index %x) was hot-added\n",
1011 lmbs[i].base_addr, lmbs[i].drc_index);
1012 lmbs[i].reserved = 0;
1019 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
1021 struct device_node *dn;
1022 struct property *prop;
1023 u32 count, drc_index;
1026 lock_device_hotplug();
1028 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1031 goto dlpar_memory_out;
1034 prop = dlpar_clone_drconf_property(dn);
1037 goto dlpar_memory_out;
1040 switch (hp_elog->action) {
1041 case PSERIES_HP_ELOG_ACTION_ADD:
1042 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
1043 count = hp_elog->_drc_u.drc_count;
1044 rc = dlpar_memory_add_by_count(count, prop);
1045 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
1046 drc_index = hp_elog->_drc_u.drc_index;
1047 rc = dlpar_memory_add_by_index(drc_index, prop);
1048 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
1049 count = hp_elog->_drc_u.ic.count;
1050 drc_index = hp_elog->_drc_u.ic.index;
1051 rc = dlpar_memory_add_by_ic(count, drc_index, prop);
1057 case PSERIES_HP_ELOG_ACTION_REMOVE:
1058 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
1059 count = hp_elog->_drc_u.drc_count;
1060 rc = dlpar_memory_remove_by_count(count, prop);
1061 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
1062 drc_index = hp_elog->_drc_u.drc_index;
1063 rc = dlpar_memory_remove_by_index(drc_index, prop);
1064 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
1065 count = hp_elog->_drc_u.ic.count;
1066 drc_index = hp_elog->_drc_u.ic.index;
1067 rc = dlpar_memory_remove_by_ic(count, drc_index, prop);
1073 case PSERIES_HP_ELOG_ACTION_READD:
1074 drc_index = hp_elog->_drc_u.drc_index;
1075 rc = dlpar_memory_readd_by_index(drc_index, prop);
1078 pr_err("Invalid action (%d) specified\n", hp_elog->action);
1083 dlpar_free_property(prop);
1087 unlock_device_hotplug();
1091 static int pseries_add_mem_node(struct device_node *np)
1096 unsigned int lmb_size;
1100 * Check to see if we are actually adding memory
1102 type = of_get_property(np, "device_type", NULL);
1103 if (type == NULL || strcmp(type, "memory") != 0)
1107 * Find the base and size of the memblock
1109 regs = of_get_property(np, "reg", NULL);
1113 base = be64_to_cpu(*(unsigned long *)regs);
1114 lmb_size = be32_to_cpu(regs[3]);
1117 * Update memory region to represent the memory add
1119 ret = memblock_add(base, lmb_size);
1120 return (ret < 0) ? -EINVAL : 0;
1123 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
1125 struct of_drconf_cell *new_drmem, *old_drmem;
1126 unsigned long memblock_size;
1129 int i, rc = -EINVAL;
1134 memblock_size = pseries_memory_block_size();
1138 p = (__be32 *) pr->old_prop->value;
1142 /* The first int of the property is the number of lmb's described
1143 * by the property. This is followed by an array of of_drconf_cell
1144 * entries. Get the number of entries and skip to the array of
1147 entries = be32_to_cpu(*p++);
1148 old_drmem = (struct of_drconf_cell *)p;
1150 p = (__be32 *)pr->prop->value;
1152 new_drmem = (struct of_drconf_cell *)p;
1154 for (i = 0; i < entries; i++) {
1155 if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1156 (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1157 rc = pseries_remove_memblock(
1158 be64_to_cpu(old_drmem[i].base_addr),
1161 } else if ((!(be32_to_cpu(old_drmem[i].flags) &
1162 DRCONF_MEM_ASSIGNED)) &&
1163 (be32_to_cpu(new_drmem[i].flags) &
1164 DRCONF_MEM_ASSIGNED)) {
1165 rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1167 rc = (rc < 0) ? -EINVAL : 0;
1174 static int pseries_memory_notifier(struct notifier_block *nb,
1175 unsigned long action, void *data)
1177 struct of_reconfig_data *rd = data;
1181 case OF_RECONFIG_ATTACH_NODE:
1182 err = pseries_add_mem_node(rd->dn);
1184 case OF_RECONFIG_DETACH_NODE:
1185 err = pseries_remove_mem_node(rd->dn);
1187 case OF_RECONFIG_UPDATE_PROPERTY:
1188 if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1189 err = pseries_update_drconf_memory(rd);
1192 return notifier_from_errno(err);
1195 static struct notifier_block pseries_mem_nb = {
1196 .notifier_call = pseries_memory_notifier,
1199 static int __init pseries_memory_hotplug_init(void)
1201 if (firmware_has_feature(FW_FEATURE_LPAR))
1202 of_reconfig_notifier_register(&pseries_mem_nb);
1206 machine_device_initcall(pseries, pseries_memory_hotplug_init);