1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * pseries Memory Hotplug infrastructure.
5 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
8 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/sparsemem.h>
20 #include <asm/fadump.h>
21 #include <asm/drmem.h>
24 static void dlpar_free_property(struct property *prop)
31 static struct property *dlpar_clone_property(struct property *prop,
34 struct property *new_prop;
36 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
40 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
41 new_prop->value = kzalloc(prop_size, GFP_KERNEL);
42 if (!new_prop->name || !new_prop->value) {
43 dlpar_free_property(new_prop);
47 memcpy(new_prop->value, prop->value, prop->length);
48 new_prop->length = prop_size;
50 of_property_set_flag(new_prop, OF_DYNAMIC);
54 static bool find_aa_index(struct device_node *dr_node,
55 struct property *ala_prop,
56 const u32 *lmb_assoc, u32 *aa_index)
60 struct property *new_prop;
61 int aa_arrays, aa_array_entries, aa_array_sz;
65 * The ibm,associativity-lookup-arrays property is defined to be
66 * a 32-bit value specifying the number of associativity arrays
67 * followed by a 32-bitvalue specifying the number of entries per
68 * array, followed by the associativity arrays.
70 assoc_arrays = ala_prop->value;
72 aa_arrays = be32_to_cpu(assoc_arrays[0]);
73 aa_array_entries = be32_to_cpu(assoc_arrays[1]);
74 aa_array_sz = aa_array_entries * sizeof(u32);
76 for (i = 0; i < aa_arrays; i++) {
77 index = (i * aa_array_entries) + 2;
79 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
86 new_prop_size = ala_prop->length + aa_array_sz;
87 new_prop = dlpar_clone_property(ala_prop, new_prop_size);
91 assoc_arrays = new_prop->value;
93 /* increment the number of entries in the lookup array */
94 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
96 /* copy the new associativity into the lookup array */
97 index = aa_arrays * aa_array_entries + 2;
98 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
100 of_update_property(dr_node, new_prop);
103 * The associativity lookup array index for this lmb is
104 * number of entries - 1 since we added its associativity
105 * to the end of the lookup array.
107 *aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
111 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
113 struct device_node *parent, *lmb_node, *dr_node;
114 struct property *ala_prop;
115 const u32 *lmb_assoc;
119 parent = of_find_node_by_path("/");
123 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
129 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
131 dlpar_free_cc_nodes(lmb_node);
135 update_numa_distance(lmb_node);
137 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
139 dlpar_free_cc_nodes(lmb_node);
143 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
146 of_node_put(dr_node);
147 dlpar_free_cc_nodes(lmb_node);
151 found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
153 of_node_put(dr_node);
154 dlpar_free_cc_nodes(lmb_node);
157 pr_err("Could not find LMB associativity\n");
161 lmb->aa_index = aa_index;
165 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
167 unsigned long section_nr;
168 struct memory_block *mem_block;
170 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
172 mem_block = find_memory_block(section_nr);
176 static int get_lmb_range(u32 drc_index, int n_lmbs,
177 struct drmem_lmb **start_lmb,
178 struct drmem_lmb **end_lmb)
180 struct drmem_lmb *lmb, *start, *end;
181 struct drmem_lmb *limit;
184 for_each_drmem_lmb(lmb) {
185 if (lmb->drc_index == drc_index) {
194 end = &start[n_lmbs];
196 limit = &drmem_info->lmbs[drmem_info->n_lmbs];
205 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
207 struct memory_block *mem_block;
210 mem_block = lmb_to_memblock(lmb);
212 pr_err("Failed memory block lookup for LMB 0x%x\n", lmb->drc_index);
216 if (online && mem_block->dev.offline)
217 rc = device_online(&mem_block->dev);
218 else if (!online && !mem_block->dev.offline)
219 rc = device_offline(&mem_block->dev);
223 put_device(&mem_block->dev);
228 static int dlpar_online_lmb(struct drmem_lmb *lmb)
230 return dlpar_change_lmb_state(lmb, true);
233 #ifdef CONFIG_MEMORY_HOTREMOVE
234 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
236 return dlpar_change_lmb_state(lmb, false);
239 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
241 unsigned long start_pfn;
242 int sections_per_block;
245 start_pfn = base >> PAGE_SHIFT;
247 lock_device_hotplug();
249 if (!pfn_valid(start_pfn))
252 sections_per_block = memory_block_size / MIN_MEMORY_BLOCK_SIZE;
254 for (i = 0; i < sections_per_block; i++) {
255 __remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
256 base += MIN_MEMORY_BLOCK_SIZE;
260 /* Update memory regions for memory remove */
261 memblock_remove(base, memblock_size);
262 unlock_device_hotplug();
266 static int pseries_remove_mem_node(struct device_node *np)
272 * Check to see if we are actually removing memory
274 if (!of_node_is_type(np, "memory"))
278 * Find the base address and size of the memblock
280 ret = of_address_to_resource(np, 0, &res);
284 pseries_remove_memblock(res.start, resource_size(&res));
288 static bool lmb_is_removable(struct drmem_lmb *lmb)
290 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
291 !(lmb->flags & DRCONF_MEM_ASSIGNED))
294 #ifdef CONFIG_FA_DUMP
296 * Don't hot-remove memory that falls in fadump boot memory area
297 * and memory that is reserved for capturing old kernel memory.
299 if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
302 /* device_offline() will determine if we can actually remove this lmb */
306 static int dlpar_add_lmb(struct drmem_lmb *);
308 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
310 struct memory_block *mem_block;
313 if (!lmb_is_removable(lmb))
316 mem_block = lmb_to_memblock(lmb);
317 if (mem_block == NULL)
320 rc = dlpar_offline_lmb(lmb);
322 put_device(&mem_block->dev);
326 __remove_memory(lmb->base_addr, memory_block_size);
327 put_device(&mem_block->dev);
329 /* Update memory regions for memory remove */
330 memblock_remove(lmb->base_addr, memory_block_size);
332 invalidate_lmb_associativity_index(lmb);
333 lmb->flags &= ~DRCONF_MEM_ASSIGNED;
338 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
340 struct drmem_lmb *lmb;
341 int lmbs_reserved = 0;
342 int lmbs_available = 0;
345 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
347 if (lmbs_to_remove == 0)
350 /* Validate that there are enough LMBs to satisfy the request */
351 for_each_drmem_lmb(lmb) {
352 if (lmb_is_removable(lmb))
355 if (lmbs_available == lmbs_to_remove)
359 if (lmbs_available < lmbs_to_remove) {
360 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
361 lmbs_available, lmbs_to_remove);
365 for_each_drmem_lmb(lmb) {
366 rc = dlpar_remove_lmb(lmb);
370 /* Mark this lmb so we can add it later if all of the
371 * requested LMBs cannot be removed.
373 drmem_mark_lmb_reserved(lmb);
376 if (lmbs_reserved == lmbs_to_remove)
380 if (lmbs_reserved != lmbs_to_remove) {
381 pr_err("Memory hot-remove failed, adding LMB's back\n");
383 for_each_drmem_lmb(lmb) {
384 if (!drmem_lmb_reserved(lmb))
387 rc = dlpar_add_lmb(lmb);
389 pr_err("Failed to add LMB back, drc index %x\n",
392 drmem_remove_lmb_reservation(lmb);
395 if (lmbs_reserved == 0)
401 for_each_drmem_lmb(lmb) {
402 if (!drmem_lmb_reserved(lmb))
405 dlpar_release_drc(lmb->drc_index);
406 pr_info("Memory at %llx was hot-removed\n",
409 drmem_remove_lmb_reservation(lmb);
412 if (lmbs_reserved == 0)
421 static int dlpar_memory_remove_by_index(u32 drc_index)
423 struct drmem_lmb *lmb;
427 pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
430 for_each_drmem_lmb(lmb) {
431 if (lmb->drc_index == drc_index) {
433 rc = dlpar_remove_lmb(lmb);
435 dlpar_release_drc(lmb->drc_index);
442 pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
445 pr_debug("Failed to hot-remove memory at %llx\n",
448 pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
454 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
456 struct drmem_lmb *lmb, *start_lmb, *end_lmb;
459 pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
460 lmbs_to_remove, drc_index);
462 if (lmbs_to_remove == 0)
465 rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
470 * Validate that all LMBs in range are not reserved. Note that it
471 * is ok if they are !ASSIGNED since our goal here is to remove the
472 * LMB range, regardless of whether some LMBs were already removed
473 * by any other reason.
475 * This is a contrast to what is done in remove_by_count() where we
476 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
477 * because we want to remove a fixed amount of LMBs in that function.
479 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
480 if (lmb->flags & DRCONF_MEM_RESERVED) {
481 pr_err("Memory at %llx (drc index %x) is reserved\n",
482 lmb->base_addr, lmb->drc_index);
487 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
489 * dlpar_remove_lmb() will error out if the LMB is already
490 * !ASSIGNED, but this case is a no-op for us.
492 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
495 rc = dlpar_remove_lmb(lmb);
499 drmem_mark_lmb_reserved(lmb);
503 pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
506 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
507 if (!drmem_lmb_reserved(lmb))
511 * Setting the isolation state of an UNISOLATED/CONFIGURED
512 * device to UNISOLATE is a no-op, but the hypervisor can
513 * use it as a hint that the LMB removal failed.
515 dlpar_unisolate_drc(lmb->drc_index);
517 rc = dlpar_add_lmb(lmb);
519 pr_err("Failed to add LMB, drc index %x\n",
522 drmem_remove_lmb_reservation(lmb);
526 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
527 if (!drmem_lmb_reserved(lmb))
530 dlpar_release_drc(lmb->drc_index);
531 pr_info("Memory at %llx (drc index %x) was hot-removed\n",
532 lmb->base_addr, lmb->drc_index);
534 drmem_remove_lmb_reservation(lmb);
542 static inline int pseries_remove_memblock(unsigned long base,
543 unsigned long memblock_size)
547 static inline int pseries_remove_mem_node(struct device_node *np)
551 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
555 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
559 static int dlpar_memory_remove_by_index(u32 drc_index)
564 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
568 #endif /* CONFIG_MEMORY_HOTREMOVE */
570 static int dlpar_add_lmb(struct drmem_lmb *lmb)
572 unsigned long block_sz;
575 if (lmb->flags & DRCONF_MEM_ASSIGNED)
578 rc = update_lmb_associativity_index(lmb);
580 dlpar_release_drc(lmb->drc_index);
581 pr_err("Failed to configure LMB 0x%x\n", lmb->drc_index);
585 block_sz = memory_block_size_bytes();
587 /* Find the node id for this LMB. Fake one if necessary. */
588 nid = of_drconf_to_nid_single(lmb);
589 if (nid < 0 || !node_possible(nid))
590 nid = first_online_node;
593 rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY);
595 pr_err("Failed to add LMB 0x%x to node %u", lmb->drc_index, nid);
596 invalidate_lmb_associativity_index(lmb);
600 rc = dlpar_online_lmb(lmb);
602 pr_err("Failed to online LMB 0x%x on node %u\n", lmb->drc_index, nid);
603 __remove_memory(lmb->base_addr, block_sz);
604 invalidate_lmb_associativity_index(lmb);
606 lmb->flags |= DRCONF_MEM_ASSIGNED;
612 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
614 struct drmem_lmb *lmb;
615 int lmbs_available = 0;
616 int lmbs_reserved = 0;
619 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
621 if (lmbs_to_add == 0)
624 /* Validate that there are enough LMBs to satisfy the request */
625 for_each_drmem_lmb(lmb) {
626 if (lmb->flags & DRCONF_MEM_RESERVED)
629 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
632 if (lmbs_available == lmbs_to_add)
636 if (lmbs_available < lmbs_to_add)
639 for_each_drmem_lmb(lmb) {
640 if (lmb->flags & DRCONF_MEM_ASSIGNED)
643 rc = dlpar_acquire_drc(lmb->drc_index);
647 rc = dlpar_add_lmb(lmb);
649 dlpar_release_drc(lmb->drc_index);
653 /* Mark this lmb so we can remove it later if all of the
654 * requested LMBs cannot be added.
656 drmem_mark_lmb_reserved(lmb);
658 if (lmbs_reserved == lmbs_to_add)
662 if (lmbs_reserved != lmbs_to_add) {
663 pr_err("Memory hot-add failed, removing any added LMBs\n");
665 for_each_drmem_lmb(lmb) {
666 if (!drmem_lmb_reserved(lmb))
669 rc = dlpar_remove_lmb(lmb);
671 pr_err("Failed to remove LMB, drc index %x\n",
674 dlpar_release_drc(lmb->drc_index);
676 drmem_remove_lmb_reservation(lmb);
679 if (lmbs_reserved == 0)
684 for_each_drmem_lmb(lmb) {
685 if (!drmem_lmb_reserved(lmb))
688 pr_debug("Memory at %llx (drc index %x) was hot-added\n",
689 lmb->base_addr, lmb->drc_index);
690 drmem_remove_lmb_reservation(lmb);
693 if (lmbs_reserved == 0)
702 static int dlpar_memory_add_by_index(u32 drc_index)
704 struct drmem_lmb *lmb;
707 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
710 for_each_drmem_lmb(lmb) {
711 if (lmb->drc_index == drc_index) {
713 rc = dlpar_acquire_drc(lmb->drc_index);
715 rc = dlpar_add_lmb(lmb);
717 dlpar_release_drc(lmb->drc_index);
728 pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
730 pr_info("Memory at %llx (drc index %x) was hot-added\n",
731 lmb->base_addr, drc_index);
736 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
738 struct drmem_lmb *lmb, *start_lmb, *end_lmb;
741 pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
742 lmbs_to_add, drc_index);
744 if (lmbs_to_add == 0)
747 rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
751 /* Validate that the LMBs in this range are not reserved */
752 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
753 /* Fail immediately if the whole range can't be hot-added */
754 if (lmb->flags & DRCONF_MEM_RESERVED) {
755 pr_err("Memory at %llx (drc index %x) is reserved\n",
756 lmb->base_addr, lmb->drc_index);
761 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
762 if (lmb->flags & DRCONF_MEM_ASSIGNED)
765 rc = dlpar_acquire_drc(lmb->drc_index);
769 rc = dlpar_add_lmb(lmb);
771 dlpar_release_drc(lmb->drc_index);
775 drmem_mark_lmb_reserved(lmb);
779 pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
781 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
782 if (!drmem_lmb_reserved(lmb))
785 rc = dlpar_remove_lmb(lmb);
787 pr_err("Failed to remove LMB, drc index %x\n",
790 dlpar_release_drc(lmb->drc_index);
792 drmem_remove_lmb_reservation(lmb);
796 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
797 if (!drmem_lmb_reserved(lmb))
800 pr_info("Memory at %llx (drc index %x) was hot-added\n",
801 lmb->base_addr, lmb->drc_index);
802 drmem_remove_lmb_reservation(lmb);
809 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
811 u32 count, drc_index;
814 lock_device_hotplug();
816 switch (hp_elog->action) {
817 case PSERIES_HP_ELOG_ACTION_ADD:
818 switch (hp_elog->id_type) {
819 case PSERIES_HP_ELOG_ID_DRC_COUNT:
820 count = hp_elog->_drc_u.drc_count;
821 rc = dlpar_memory_add_by_count(count);
823 case PSERIES_HP_ELOG_ID_DRC_INDEX:
824 drc_index = hp_elog->_drc_u.drc_index;
825 rc = dlpar_memory_add_by_index(drc_index);
827 case PSERIES_HP_ELOG_ID_DRC_IC:
828 count = hp_elog->_drc_u.ic.count;
829 drc_index = hp_elog->_drc_u.ic.index;
830 rc = dlpar_memory_add_by_ic(count, drc_index);
838 case PSERIES_HP_ELOG_ACTION_REMOVE:
839 switch (hp_elog->id_type) {
840 case PSERIES_HP_ELOG_ID_DRC_COUNT:
841 count = hp_elog->_drc_u.drc_count;
842 rc = dlpar_memory_remove_by_count(count);
844 case PSERIES_HP_ELOG_ID_DRC_INDEX:
845 drc_index = hp_elog->_drc_u.drc_index;
846 rc = dlpar_memory_remove_by_index(drc_index);
848 case PSERIES_HP_ELOG_ID_DRC_IC:
849 count = hp_elog->_drc_u.ic.count;
850 drc_index = hp_elog->_drc_u.ic.index;
851 rc = dlpar_memory_remove_by_ic(count, drc_index);
860 pr_err("Invalid action (%d) specified\n", hp_elog->action);
866 rc = drmem_update_dt();
868 unlock_device_hotplug();
872 static int pseries_add_mem_node(struct device_node *np)
878 * Check to see if we are actually adding memory
880 if (!of_node_is_type(np, "memory"))
884 * Find the base and size of the memblock
886 ret = of_address_to_resource(np, 0, &res);
891 * Update memory region to represent the memory add
893 ret = memblock_add(res.start, resource_size(&res));
894 return (ret < 0) ? -EINVAL : 0;
897 static int pseries_memory_notifier(struct notifier_block *nb,
898 unsigned long action, void *data)
900 struct of_reconfig_data *rd = data;
904 case OF_RECONFIG_ATTACH_NODE:
905 err = pseries_add_mem_node(rd->dn);
907 case OF_RECONFIG_DETACH_NODE:
908 err = pseries_remove_mem_node(rd->dn);
910 case OF_RECONFIG_UPDATE_PROPERTY:
911 if (!strcmp(rd->dn->name,
912 "ibm,dynamic-reconfiguration-memory"))
913 drmem_update_lmbs(rd->prop);
915 return notifier_from_errno(err);
918 static struct notifier_block pseries_mem_nb = {
919 .notifier_call = pseries_memory_notifier,
922 static int __init pseries_memory_hotplug_init(void)
924 if (firmware_has_feature(FW_FEATURE_LPAR))
925 of_reconfig_notifier_register(&pseries_mem_nb);
929 machine_device_initcall(pseries, pseries_memory_hotplug_init);