2 * NUMA support for s390
4 * Implement NUMA core code.
6 * Copyright IBM Corp. 2015
9 #define KMSG_COMPONENT "numa"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/mmzone.h>
14 #include <linux/cpumask.h>
15 #include <linux/bootmem.h>
16 #include <linux/memblock.h>
17 #include <linux/slab.h>
18 #include <linux/node.h>
21 #include "numa_mode.h"
23 pg_data_t *node_data[MAX_NUMNODES];
24 EXPORT_SYMBOL(node_data);
26 cpumask_t node_to_cpumask_map[MAX_NUMNODES];
27 EXPORT_SYMBOL(node_to_cpumask_map);
29 const struct numa_mode numa_mode_plain = {
33 static const struct numa_mode *mode = &numa_mode_plain;
35 int numa_pfn_to_nid(unsigned long pfn)
37 return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
40 void numa_update_cpu_topology(void)
42 if (mode->update_cpu_topology)
43 mode->update_cpu_topology();
46 int __node_distance(int a, int b)
48 return mode->distance ? mode->distance(a, b) : 0;
50 EXPORT_SYMBOL(__node_distance);
52 int numa_debug_enabled;
55 * alloc_node_data() - Allocate node data
57 static __init pg_data_t *alloc_node_data(void)
61 res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1);
63 panic("Could not allocate memory for node data!\n");
64 memset(res, 0, sizeof(pg_data_t));
69 * numa_setup_memory() - Assign bootmem to nodes
71 * The memory is first added to memblock without any respect to nodes.
72 * This is fixed before remaining memblock memory is handed over to the
74 * An important side effect is that large bootmem allocations might easily
75 * cross node boundaries, which can be needed for large allocations with
76 * smaller memory stripes in each node (i.e. when using NUMA emulation).
78 * Memory defines nodes:
79 * Therefore this routine also sets the nodes online with memory.
81 static void __init numa_setup_memory(void)
83 unsigned long cur_base, align, end_of_dram;
86 end_of_dram = memblock_end_of_DRAM();
87 align = mode->align ? mode->align() : ULONG_MAX;
90 * Step through all available memory and assign it to the nodes
91 * indicated by the mode implementation.
92 * All nodes which are seen here will be set online.
96 nid = numa_pfn_to_nid(PFN_DOWN(cur_base));
98 memblock_set_node(cur_base, align, &memblock.memory, nid);
100 } while (cur_base < end_of_dram);
102 /* Allocate and fill out node_data */
103 for (nid = 0; nid < MAX_NUMNODES; nid++)
104 NODE_DATA(nid) = alloc_node_data();
106 for_each_online_node(nid) {
107 unsigned long start_pfn, end_pfn;
108 unsigned long t_start, t_end;
111 start_pfn = ULONG_MAX;
113 for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) {
114 if (t_start < start_pfn)
119 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
120 NODE_DATA(nid)->node_id = nid;
125 * numa_setup() - Earliest initialization
127 * Assign the mode and call the mode's setup routine.
129 void __init numa_setup(void)
131 pr_info("NUMA mode: %s\n", mode->name);
140 * numa_init_early() - Initialization initcall
142 * This runs when only one CPU is online and before the first
143 * topology update is called for by the scheduler.
145 static int __init numa_init_early(void)
147 /* Attach all possible CPUs to node 0 for now. */
148 cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
151 early_initcall(numa_init_early);
154 * numa_init_late() - Initialization initcall
156 * Register NUMA nodes.
158 static int __init numa_init_late(void)
162 for_each_online_node(nid)
163 register_one_node(nid);
166 device_initcall(numa_init_late);
168 static int __init parse_debug(char *parm)
170 numa_debug_enabled = 1;
173 early_param("numa_debug", parse_debug);
175 static int __init parse_numa(char *parm)
177 if (strcmp(parm, numa_mode_plain.name) == 0)
178 mode = &numa_mode_plain;
179 #ifdef CONFIG_NUMA_EMU
180 if (strcmp(parm, numa_mode_emu.name) == 0)
181 mode = &numa_mode_emu;
185 early_param("numa", parse_numa);