1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/cpu_mf.h>
13 #include <asm/abs_lowcore.h>
14 #include "decompressor.h"
18 unsigned long __bootdata_preserved(__kaslr_offset);
19 unsigned long __bootdata_preserved(__abs_lowcore);
20 unsigned long __bootdata_preserved(__memcpy_real_area);
21 unsigned long __bootdata(__amode31_base);
22 unsigned long __bootdata_preserved(VMALLOC_START);
23 unsigned long __bootdata_preserved(VMALLOC_END);
24 struct page *__bootdata_preserved(vmemmap);
25 unsigned long __bootdata_preserved(vmemmap_size);
26 unsigned long __bootdata_preserved(MODULES_VADDR);
27 unsigned long __bootdata_preserved(MODULES_END);
28 unsigned long __bootdata(ident_map_size);
29 int __bootdata(is_full_image) = 1;
30 struct initrd_data __bootdata(initrd_data);
32 u64 __bootdata_preserved(stfle_fac_list[16]);
33 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
34 struct oldmem_data __bootdata_preserved(oldmem_data);
38 sclp_early_printk("\n\n");
40 sclp_early_printk("\n\n -- System halted");
45 static void setup_lpp(void)
47 S390_lowcore.current_pid = 0;
48 S390_lowcore.lpp = LPP_MAGIC;
49 if (test_facility(40))
50 lpp(&S390_lowcore.lpp);
53 #ifdef CONFIG_KERNEL_UNCOMPRESSED
54 unsigned long mem_safe_offset(void)
56 return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
60 static unsigned long rescue_initrd(unsigned long safe_addr)
62 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
64 if (!initrd_data.start || !initrd_data.size)
66 if (initrd_data.start < safe_addr) {
67 memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
68 initrd_data.start = safe_addr;
70 return initrd_data.start + initrd_data.size;
73 static void copy_bootdata(void)
75 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
76 error(".boot.data section size mismatch");
77 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
78 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
79 error(".boot.preserved.data section size mismatch");
80 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
83 static void handle_relocs(unsigned long offset)
85 Elf64_Rela *rela_start, *rela_end, *rela;
86 int r_type, r_sym, rc;
90 rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
91 rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
92 dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
93 for (rela = rela_start; rela < rela_end; rela++) {
94 loc = rela->r_offset + offset;
96 r_sym = ELF64_R_SYM(rela->r_info);
98 if (dynsym[r_sym].st_shndx != SHN_UNDEF)
99 val += dynsym[r_sym].st_value + offset;
102 * 0 == undefined symbol table index (STN_UNDEF),
103 * used for R_390_RELATIVE, only add KASLR offset
107 r_type = ELF64_R_TYPE(rela->r_info);
108 rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
110 error("Unknown relocation type");
115 * Merge information from several sources into a single ident_map_size value.
116 * "ident_map_size" represents the upper limit of physical memory we may ever
117 * reach. It might not be all online memory, but also include standby (offline)
118 * memory. "ident_map_size" could be lower then actual standby or even online
119 * memory present, due to limiting factors. We should never go above this limit.
120 * It is the size of our identity mapping.
122 * Consider the following factors:
123 * 1. max_physmem_end - end of physical memory online or standby.
124 * Always <= end of the last online memory block (get_mem_detect_end()).
125 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
126 * kernel is able to support.
127 * 3. "mem=" kernel command line option which limits physical memory usage.
128 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
130 * 5. "hsa" size which is a memory limit when the kernel is executed during
133 static void setup_ident_map_size(unsigned long max_physmem_end)
135 unsigned long hsa_size;
137 ident_map_size = max_physmem_end;
139 ident_map_size = min(ident_map_size, memory_limit);
140 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
142 #ifdef CONFIG_CRASH_DUMP
143 if (oldmem_data.start) {
145 ident_map_size = min(ident_map_size, oldmem_data.size);
146 } else if (ipl_block_valid && is_ipl_block_dump()) {
148 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
149 ident_map_size = min(ident_map_size, hsa_size);
154 static void setup_kernel_memory_layout(void)
156 unsigned long vmemmap_start;
157 unsigned long rte_size;
161 pages = ident_map_size / PAGE_SIZE;
162 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
163 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
165 /* choose kernel address space layout: 4 or 3 levels. */
166 vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
167 if (IS_ENABLED(CONFIG_KASAN) ||
168 vmalloc_size > _REGION2_SIZE ||
169 vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
171 vmax = _REGION1_SIZE;
172 rte_size = _REGION2_SIZE;
174 vmax = _REGION2_SIZE;
175 rte_size = _REGION3_SIZE;
178 * forcing modules and vmalloc area under the ultravisor
179 * secure storage limit, so that any vmalloc allocation
180 * we do could be used to back secure guest storage.
182 vmax = adjust_to_uv_max(vmax);
184 /* force vmalloc and modules below kasan shadow */
185 vmax = min(vmax, KASAN_SHADOW_START);
187 __memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE);
188 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
189 sizeof(struct lowcore));
190 MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
191 MODULES_VADDR = MODULES_END - MODULES_LEN;
192 VMALLOC_END = MODULES_VADDR;
194 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
195 vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
196 VMALLOC_START = VMALLOC_END - vmalloc_size;
198 /* split remaining virtual space between 1:1 mapping & vmemmap array */
199 pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
200 pages = SECTION_ALIGN_UP(pages);
201 /* keep vmemmap_start aligned to a top level region table entry */
202 vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
203 /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
204 vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
205 /* make sure identity map doesn't overlay with vmemmap */
206 ident_map_size = min(ident_map_size, vmemmap_start);
207 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
208 /* make sure vmemmap doesn't overlay with vmalloc area */
209 VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
210 vmemmap = (struct page *)vmemmap_start;
214 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
216 static void clear_bss_section(void)
218 memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
222 * Set vmalloc area size to an 8th of (potential) physical memory
223 * size, unless size has been set by kernel command line parameter.
225 static void setup_vmalloc_size(void)
229 if (vmalloc_size_set)
231 size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
232 vmalloc_size = max(size, vmalloc_size);
235 static void offset_vmlinux_info(unsigned long offset)
237 vmlinux.default_lma += offset;
238 *(unsigned long *)(&vmlinux.entry) += offset;
239 vmlinux.bootdata_off += offset;
240 vmlinux.bootdata_preserved_off += offset;
241 vmlinux.rela_dyn_start += offset;
242 vmlinux.rela_dyn_end += offset;
243 vmlinux.dynsym_start += offset;
246 static unsigned long reserve_amode31(unsigned long safe_addr)
248 __amode31_base = PAGE_ALIGN(safe_addr);
249 return safe_addr + vmlinux.amode31_size;
252 void startup_kernel(void)
254 unsigned long max_physmem_end;
255 unsigned long random_lma;
256 unsigned long safe_addr;
259 initrd_data.start = parmarea.initrd_start;
260 initrd_data.size = parmarea.initrd_size;
261 oldmem_data.start = parmarea.oldmem_base;
262 oldmem_data.size = parmarea.oldmem_size;
265 store_ipl_parmblock();
266 safe_addr = mem_safe_offset();
267 safe_addr = reserve_amode31(safe_addr);
268 safe_addr = read_ipl_report(safe_addr);
270 safe_addr = rescue_initrd(safe_addr);
271 sclp_early_read_info();
272 setup_boot_command_line();
273 parse_boot_command_line();
274 sanitize_prot_virt_host();
275 max_physmem_end = detect_memory(&safe_addr);
276 setup_ident_map_size(max_physmem_end);
277 setup_vmalloc_size();
278 setup_kernel_memory_layout();
280 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
281 random_lma = get_random_base(safe_addr);
283 __kaslr_offset = random_lma - vmlinux.default_lma;
284 img = (void *)vmlinux.default_lma;
285 offset_vmlinux_info(__kaslr_offset);
289 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
290 img = decompress_kernel();
291 memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
292 } else if (__kaslr_offset)
293 memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
297 handle_relocs(__kaslr_offset);
299 if (__kaslr_offset) {
301 * Save KASLR offset for early dumps, before vmcore_info is set.
302 * Mark as uneven to distinguish from real vmcore_info pointer.
304 S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
305 /* Clear non-relocated kernel */
306 if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
307 memset(img, 0, vmlinux.image_size);