1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * pseries Memory Hotplug infrastructure.
5 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
8 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/sparsemem.h>
20 #include <asm/fadump.h>
21 #include <asm/drmem.h>
24 unsigned long pseries_memory_block_size(void)
26 struct device_node *np;
27 u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
30 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
36 size_cells = of_n_size_cells(np);
38 prop = of_get_property(np, "ibm,lmb-size", &len);
39 if (prop && len >= size_cells * sizeof(__be32))
40 memblock_size = of_read_number(prop, size_cells);
43 } else if (machine_is(pseries)) {
44 /* This fallback really only applies to pseries */
45 unsigned int memzero_size = 0;
47 np = of_find_node_by_path("/memory@0");
49 if (!of_address_to_resource(np, 0, &r))
50 memzero_size = resource_size(&r);
55 /* We now know the size of memory@0, use this to find
56 * the first memoryblock and get its size.
60 sprintf(buf, "/memory@%x", memzero_size);
61 np = of_find_node_by_path(buf);
63 if (!of_address_to_resource(np, 0, &r))
64 memblock_size = resource_size(&r);
72 static void dlpar_free_property(struct property *prop)
79 static struct property *dlpar_clone_property(struct property *prop,
82 struct property *new_prop;
84 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
88 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
89 new_prop->value = kzalloc(prop_size, GFP_KERNEL);
90 if (!new_prop->name || !new_prop->value) {
91 dlpar_free_property(new_prop);
95 memcpy(new_prop->value, prop->value, prop->length);
96 new_prop->length = prop_size;
98 of_property_set_flag(new_prop, OF_DYNAMIC);
102 static bool find_aa_index(struct device_node *dr_node,
103 struct property *ala_prop,
104 const u32 *lmb_assoc, u32 *aa_index)
106 u32 *assoc_arrays, new_prop_size;
107 struct property *new_prop;
108 int aa_arrays, aa_array_entries, aa_array_sz;
112 * The ibm,associativity-lookup-arrays property is defined to be
113 * a 32-bit value specifying the number of associativity arrays
114 * followed by a 32-bitvalue specifying the number of entries per
115 * array, followed by the associativity arrays.
117 assoc_arrays = ala_prop->value;
119 aa_arrays = be32_to_cpu(assoc_arrays[0]);
120 aa_array_entries = be32_to_cpu(assoc_arrays[1]);
121 aa_array_sz = aa_array_entries * sizeof(u32);
123 for (i = 0; i < aa_arrays; i++) {
124 index = (i * aa_array_entries) + 2;
126 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
133 new_prop_size = ala_prop->length + aa_array_sz;
134 new_prop = dlpar_clone_property(ala_prop, new_prop_size);
138 assoc_arrays = new_prop->value;
140 /* increment the number of entries in the lookup array */
141 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
143 /* copy the new associativity into the lookup array */
144 index = aa_arrays * aa_array_entries + 2;
145 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
147 of_update_property(dr_node, new_prop);
150 * The associativity lookup array index for this lmb is
151 * number of entries - 1 since we added its associativity
152 * to the end of the lookup array.
154 *aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
158 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
160 struct device_node *parent, *lmb_node, *dr_node;
161 struct property *ala_prop;
162 const u32 *lmb_assoc;
166 parent = of_find_node_by_path("/");
170 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
176 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
178 dlpar_free_cc_nodes(lmb_node);
182 update_numa_distance(lmb_node);
184 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
186 dlpar_free_cc_nodes(lmb_node);
190 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
193 of_node_put(dr_node);
194 dlpar_free_cc_nodes(lmb_node);
198 found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
200 of_node_put(dr_node);
201 dlpar_free_cc_nodes(lmb_node);
204 pr_err("Could not find LMB associativity\n");
208 lmb->aa_index = aa_index;
212 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
214 unsigned long section_nr;
215 struct memory_block *mem_block;
217 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
219 mem_block = find_memory_block(section_nr);
223 static int get_lmb_range(u32 drc_index, int n_lmbs,
224 struct drmem_lmb **start_lmb,
225 struct drmem_lmb **end_lmb)
227 struct drmem_lmb *lmb, *start, *end;
228 struct drmem_lmb *limit;
231 for_each_drmem_lmb(lmb) {
232 if (lmb->drc_index == drc_index) {
241 end = &start[n_lmbs];
243 limit = &drmem_info->lmbs[drmem_info->n_lmbs];
252 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
254 struct memory_block *mem_block;
257 mem_block = lmb_to_memblock(lmb);
261 if (online && mem_block->dev.offline)
262 rc = device_online(&mem_block->dev);
263 else if (!online && !mem_block->dev.offline)
264 rc = device_offline(&mem_block->dev);
268 put_device(&mem_block->dev);
273 static int dlpar_online_lmb(struct drmem_lmb *lmb)
275 return dlpar_change_lmb_state(lmb, true);
278 #ifdef CONFIG_MEMORY_HOTREMOVE
279 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
281 return dlpar_change_lmb_state(lmb, false);
284 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
286 unsigned long block_sz, start_pfn;
287 int sections_per_block;
290 start_pfn = base >> PAGE_SHIFT;
292 lock_device_hotplug();
294 if (!pfn_valid(start_pfn))
297 block_sz = pseries_memory_block_size();
298 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
300 for (i = 0; i < sections_per_block; i++) {
301 __remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
302 base += MIN_MEMORY_BLOCK_SIZE;
306 /* Update memory regions for memory remove */
307 memblock_remove(base, memblock_size);
308 unlock_device_hotplug();
312 static int pseries_remove_mem_node(struct device_node *np)
318 * Check to see if we are actually removing memory
320 if (!of_node_is_type(np, "memory"))
324 * Find the base address and size of the memblock
326 ret = of_address_to_resource(np, 0, &res);
330 pseries_remove_memblock(res.start, resource_size(&res));
334 static bool lmb_is_removable(struct drmem_lmb *lmb)
336 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
337 !(lmb->flags & DRCONF_MEM_ASSIGNED))
340 #ifdef CONFIG_FA_DUMP
342 * Don't hot-remove memory that falls in fadump boot memory area
343 * and memory that is reserved for capturing old kernel memory.
345 if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
348 /* device_offline() will determine if we can actually remove this lmb */
352 static int dlpar_add_lmb(struct drmem_lmb *);
354 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
356 struct memory_block *mem_block;
357 unsigned long block_sz;
360 if (!lmb_is_removable(lmb))
363 mem_block = lmb_to_memblock(lmb);
364 if (mem_block == NULL)
367 rc = dlpar_offline_lmb(lmb);
369 put_device(&mem_block->dev);
373 block_sz = pseries_memory_block_size();
375 __remove_memory(lmb->base_addr, block_sz);
376 put_device(&mem_block->dev);
378 /* Update memory regions for memory remove */
379 memblock_remove(lmb->base_addr, block_sz);
381 invalidate_lmb_associativity_index(lmb);
382 lmb->flags &= ~DRCONF_MEM_ASSIGNED;
387 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
389 struct drmem_lmb *lmb;
390 int lmbs_reserved = 0;
391 int lmbs_available = 0;
394 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
396 if (lmbs_to_remove == 0)
399 /* Validate that there are enough LMBs to satisfy the request */
400 for_each_drmem_lmb(lmb) {
401 if (lmb_is_removable(lmb))
404 if (lmbs_available == lmbs_to_remove)
408 if (lmbs_available < lmbs_to_remove) {
409 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
410 lmbs_available, lmbs_to_remove);
414 for_each_drmem_lmb(lmb) {
415 rc = dlpar_remove_lmb(lmb);
419 /* Mark this lmb so we can add it later if all of the
420 * requested LMBs cannot be removed.
422 drmem_mark_lmb_reserved(lmb);
425 if (lmbs_reserved == lmbs_to_remove)
429 if (lmbs_reserved != lmbs_to_remove) {
430 pr_err("Memory hot-remove failed, adding LMB's back\n");
432 for_each_drmem_lmb(lmb) {
433 if (!drmem_lmb_reserved(lmb))
436 rc = dlpar_add_lmb(lmb);
438 pr_err("Failed to add LMB back, drc index %x\n",
441 drmem_remove_lmb_reservation(lmb);
444 if (lmbs_reserved == 0)
450 for_each_drmem_lmb(lmb) {
451 if (!drmem_lmb_reserved(lmb))
454 dlpar_release_drc(lmb->drc_index);
455 pr_info("Memory at %llx was hot-removed\n",
458 drmem_remove_lmb_reservation(lmb);
461 if (lmbs_reserved == 0)
470 static int dlpar_memory_remove_by_index(u32 drc_index)
472 struct drmem_lmb *lmb;
476 pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
479 for_each_drmem_lmb(lmb) {
480 if (lmb->drc_index == drc_index) {
482 rc = dlpar_remove_lmb(lmb);
484 dlpar_release_drc(lmb->drc_index);
494 pr_debug("Failed to hot-remove memory at %llx\n",
497 pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
502 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
504 struct drmem_lmb *lmb, *start_lmb, *end_lmb;
507 pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
508 lmbs_to_remove, drc_index);
510 if (lmbs_to_remove == 0)
513 rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
518 * Validate that all LMBs in range are not reserved. Note that it
519 * is ok if they are !ASSIGNED since our goal here is to remove the
520 * LMB range, regardless of whether some LMBs were already removed
521 * by any other reason.
523 * This is a contrast to what is done in remove_by_count() where we
524 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
525 * because we want to remove a fixed amount of LMBs in that function.
527 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
528 if (lmb->flags & DRCONF_MEM_RESERVED) {
529 pr_err("Memory at %llx (drc index %x) is reserved\n",
530 lmb->base_addr, lmb->drc_index);
535 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
537 * dlpar_remove_lmb() will error out if the LMB is already
538 * !ASSIGNED, but this case is a no-op for us.
540 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
543 rc = dlpar_remove_lmb(lmb);
547 drmem_mark_lmb_reserved(lmb);
551 pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
554 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
555 if (!drmem_lmb_reserved(lmb))
559 * Setting the isolation state of an UNISOLATED/CONFIGURED
560 * device to UNISOLATE is a no-op, but the hypervisor can
561 * use it as a hint that the LMB removal failed.
563 dlpar_unisolate_drc(lmb->drc_index);
565 rc = dlpar_add_lmb(lmb);
567 pr_err("Failed to add LMB, drc index %x\n",
570 drmem_remove_lmb_reservation(lmb);
574 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
575 if (!drmem_lmb_reserved(lmb))
578 dlpar_release_drc(lmb->drc_index);
579 pr_info("Memory at %llx (drc index %x) was hot-removed\n",
580 lmb->base_addr, lmb->drc_index);
582 drmem_remove_lmb_reservation(lmb);
590 static inline int pseries_remove_memblock(unsigned long base,
591 unsigned long memblock_size)
595 static inline int pseries_remove_mem_node(struct device_node *np)
599 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
603 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
607 static int dlpar_memory_remove_by_index(u32 drc_index)
612 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
616 #endif /* CONFIG_MEMORY_HOTREMOVE */
618 static int dlpar_add_lmb(struct drmem_lmb *lmb)
620 unsigned long block_sz;
623 if (lmb->flags & DRCONF_MEM_ASSIGNED)
626 rc = update_lmb_associativity_index(lmb);
628 dlpar_release_drc(lmb->drc_index);
632 block_sz = memory_block_size_bytes();
634 /* Find the node id for this LMB. Fake one if necessary. */
635 nid = of_drconf_to_nid_single(lmb);
636 if (nid < 0 || !node_possible(nid))
637 nid = first_online_node;
640 rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
642 invalidate_lmb_associativity_index(lmb);
646 rc = dlpar_online_lmb(lmb);
648 __remove_memory(lmb->base_addr, block_sz);
649 invalidate_lmb_associativity_index(lmb);
651 lmb->flags |= DRCONF_MEM_ASSIGNED;
657 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
659 struct drmem_lmb *lmb;
660 int lmbs_available = 0;
661 int lmbs_reserved = 0;
664 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
666 if (lmbs_to_add == 0)
669 /* Validate that there are enough LMBs to satisfy the request */
670 for_each_drmem_lmb(lmb) {
671 if (lmb->flags & DRCONF_MEM_RESERVED)
674 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
677 if (lmbs_available == lmbs_to_add)
681 if (lmbs_available < lmbs_to_add)
684 for_each_drmem_lmb(lmb) {
685 if (lmb->flags & DRCONF_MEM_ASSIGNED)
688 rc = dlpar_acquire_drc(lmb->drc_index);
692 rc = dlpar_add_lmb(lmb);
694 dlpar_release_drc(lmb->drc_index);
698 /* Mark this lmb so we can remove it later if all of the
699 * requested LMBs cannot be added.
701 drmem_mark_lmb_reserved(lmb);
703 if (lmbs_reserved == lmbs_to_add)
707 if (lmbs_reserved != lmbs_to_add) {
708 pr_err("Memory hot-add failed, removing any added LMBs\n");
710 for_each_drmem_lmb(lmb) {
711 if (!drmem_lmb_reserved(lmb))
714 rc = dlpar_remove_lmb(lmb);
716 pr_err("Failed to remove LMB, drc index %x\n",
719 dlpar_release_drc(lmb->drc_index);
721 drmem_remove_lmb_reservation(lmb);
724 if (lmbs_reserved == 0)
729 for_each_drmem_lmb(lmb) {
730 if (!drmem_lmb_reserved(lmb))
733 pr_debug("Memory at %llx (drc index %x) was hot-added\n",
734 lmb->base_addr, lmb->drc_index);
735 drmem_remove_lmb_reservation(lmb);
738 if (lmbs_reserved == 0)
747 static int dlpar_memory_add_by_index(u32 drc_index)
749 struct drmem_lmb *lmb;
752 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
755 for_each_drmem_lmb(lmb) {
756 if (lmb->drc_index == drc_index) {
758 rc = dlpar_acquire_drc(lmb->drc_index);
760 rc = dlpar_add_lmb(lmb);
762 dlpar_release_drc(lmb->drc_index);
773 pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
775 pr_info("Memory at %llx (drc index %x) was hot-added\n",
776 lmb->base_addr, drc_index);
781 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
783 struct drmem_lmb *lmb, *start_lmb, *end_lmb;
786 pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
787 lmbs_to_add, drc_index);
789 if (lmbs_to_add == 0)
792 rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
796 /* Validate that the LMBs in this range are not reserved */
797 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
798 /* Fail immediately if the whole range can't be hot-added */
799 if (lmb->flags & DRCONF_MEM_RESERVED) {
800 pr_err("Memory at %llx (drc index %x) is reserved\n",
801 lmb->base_addr, lmb->drc_index);
806 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
807 if (lmb->flags & DRCONF_MEM_ASSIGNED)
810 rc = dlpar_acquire_drc(lmb->drc_index);
814 rc = dlpar_add_lmb(lmb);
816 dlpar_release_drc(lmb->drc_index);
820 drmem_mark_lmb_reserved(lmb);
824 pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
826 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
827 if (!drmem_lmb_reserved(lmb))
830 rc = dlpar_remove_lmb(lmb);
832 pr_err("Failed to remove LMB, drc index %x\n",
835 dlpar_release_drc(lmb->drc_index);
837 drmem_remove_lmb_reservation(lmb);
841 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
842 if (!drmem_lmb_reserved(lmb))
845 pr_info("Memory at %llx (drc index %x) was hot-added\n",
846 lmb->base_addr, lmb->drc_index);
847 drmem_remove_lmb_reservation(lmb);
854 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
856 u32 count, drc_index;
859 lock_device_hotplug();
861 switch (hp_elog->action) {
862 case PSERIES_HP_ELOG_ACTION_ADD:
863 switch (hp_elog->id_type) {
864 case PSERIES_HP_ELOG_ID_DRC_COUNT:
865 count = hp_elog->_drc_u.drc_count;
866 rc = dlpar_memory_add_by_count(count);
868 case PSERIES_HP_ELOG_ID_DRC_INDEX:
869 drc_index = hp_elog->_drc_u.drc_index;
870 rc = dlpar_memory_add_by_index(drc_index);
872 case PSERIES_HP_ELOG_ID_DRC_IC:
873 count = hp_elog->_drc_u.ic.count;
874 drc_index = hp_elog->_drc_u.ic.index;
875 rc = dlpar_memory_add_by_ic(count, drc_index);
883 case PSERIES_HP_ELOG_ACTION_REMOVE:
884 switch (hp_elog->id_type) {
885 case PSERIES_HP_ELOG_ID_DRC_COUNT:
886 count = hp_elog->_drc_u.drc_count;
887 rc = dlpar_memory_remove_by_count(count);
889 case PSERIES_HP_ELOG_ID_DRC_INDEX:
890 drc_index = hp_elog->_drc_u.drc_index;
891 rc = dlpar_memory_remove_by_index(drc_index);
893 case PSERIES_HP_ELOG_ID_DRC_IC:
894 count = hp_elog->_drc_u.ic.count;
895 drc_index = hp_elog->_drc_u.ic.index;
896 rc = dlpar_memory_remove_by_ic(count, drc_index);
905 pr_err("Invalid action (%d) specified\n", hp_elog->action);
911 rc = drmem_update_dt();
913 unlock_device_hotplug();
917 static int pseries_add_mem_node(struct device_node *np)
923 * Check to see if we are actually adding memory
925 if (!of_node_is_type(np, "memory"))
929 * Find the base and size of the memblock
931 ret = of_address_to_resource(np, 0, &res);
936 * Update memory region to represent the memory add
938 ret = memblock_add(res.start, resource_size(&res));
939 return (ret < 0) ? -EINVAL : 0;
942 static int pseries_memory_notifier(struct notifier_block *nb,
943 unsigned long action, void *data)
945 struct of_reconfig_data *rd = data;
949 case OF_RECONFIG_ATTACH_NODE:
950 err = pseries_add_mem_node(rd->dn);
952 case OF_RECONFIG_DETACH_NODE:
953 err = pseries_remove_mem_node(rd->dn);
955 case OF_RECONFIG_UPDATE_PROPERTY:
956 if (!strcmp(rd->dn->name,
957 "ibm,dynamic-reconfiguration-memory"))
958 drmem_update_lmbs(rd->prop);
960 return notifier_from_errno(err);
963 static struct notifier_block pseries_mem_nb = {
964 .notifier_call = pseries_memory_notifier,
967 static int __init pseries_memory_hotplug_init(void)
969 if (firmware_has_feature(FW_FEATURE_LPAR))
970 of_reconfig_notifier_register(&pseries_mem_nb);
974 machine_device_initcall(pseries, pseries_memory_hotplug_init);