1 // SPDX-License-Identifier: GPL-2.0-only
3 * kexec.c - kexec_load system call
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/security.h>
13 #include <linux/kexec.h>
14 #include <linux/mutex.h>
15 #include <linux/list.h>
16 #include <linux/syscalls.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
20 #include "kexec_internal.h"
22 static int copy_user_segment_list(struct kimage *image,
23 unsigned long nr_segments,
24 struct kexec_segment __user *segments)
29 /* Read in the segments */
30 image->nr_segments = nr_segments;
31 segment_bytes = nr_segments * sizeof(*segments);
32 ret = copy_from_user(image->segment, segments, segment_bytes);
39 static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
40 unsigned long nr_segments,
41 struct kexec_segment __user *segments,
46 bool kexec_on_panic = flags & KEXEC_ON_CRASH;
49 /* Verify we have a valid entry point */
50 if ((entry < phys_to_boot_phys(crashk_res.start)) ||
51 (entry > phys_to_boot_phys(crashk_res.end)))
52 return -EADDRNOTAVAIL;
55 /* Allocate and initialize a controlling structure */
56 image = do_kimage_alloc_init();
62 ret = copy_user_segment_list(image, nr_segments, segments);
67 /* Enable special crash kernel control page alloc policy. */
68 image->control_page = crashk_res.start;
69 image->type = KEXEC_TYPE_CRASH;
72 ret = sanity_check_segment_list(image);
77 * Find a location for the control code buffer, and add it
78 * the vector of segments so that it's pages will also be
79 * counted as destination pages.
82 image->control_code_page = kimage_alloc_control_pages(image,
83 get_order(KEXEC_CONTROL_PAGE_SIZE));
84 if (!image->control_code_page) {
85 pr_err("Could not allocate control_code_buffer\n");
89 if (!kexec_on_panic) {
90 image->swap_page = kimage_alloc_control_pages(image, 0);
91 if (!image->swap_page) {
92 pr_err("Could not allocate swap buffer\n");
93 goto out_free_control_pages;
99 out_free_control_pages:
100 kimage_free_page_list(&image->control_pages);
106 static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
107 struct kexec_segment __user *segments, unsigned long flags)
109 struct kimage **dest_image, *image;
114 * Because we write directly to the reserved memory region when loading
115 * crash kernels we need a serialization here to prevent multiple crash
116 * kernels from attempting to load simultaneously.
118 if (!kexec_trylock())
121 if (flags & KEXEC_ON_CRASH) {
122 dest_image = &kexec_crash_image;
123 if (kexec_crash_image)
124 arch_kexec_unprotect_crashkres();
126 dest_image = &kexec_image;
129 if (nr_segments == 0) {
130 /* Uninstall image */
131 kimage_free(xchg(dest_image, NULL));
135 if (flags & KEXEC_ON_CRASH) {
137 * Loading another kernel to switch to if this one
138 * crashes. Free any current crash dump kernel before
141 kimage_free(xchg(&kexec_crash_image, NULL));
144 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
148 if (flags & KEXEC_PRESERVE_CONTEXT)
149 image->preserve_context = 1;
151 ret = machine_kexec_prepare(image);
156 * Some architecture(like S390) may touch the crash memory before
157 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
159 ret = kimage_crash_copy_vmcoreinfo(image);
163 for (i = 0; i < nr_segments; i++) {
164 ret = kimage_load_segment(image, &image->segment[i]);
169 kimage_terminate(image);
171 ret = machine_kexec_post_load(image);
175 /* Install the new kernel and uninstall the old */
176 image = xchg(dest_image, image);
179 if ((flags & KEXEC_ON_CRASH) && kexec_crash_image)
180 arch_kexec_protect_crashkres();
189 * Exec Kernel system call: for obvious reasons only root may call it.
191 * This call breaks up into three pieces.
192 * - A generic part which loads the new kernel from the current
193 * address space, and very carefully places the data in the
196 * - A generic part that interacts with the kernel and tells all of
197 * the devices to shut down. Preventing on-going dmas, and placing
198 * the devices in a consistent state so a later kernel can
201 * - A machine specific part that includes the syscall number
202 * and then copies the image to it's final destination. And
203 * jumps into the image at entry.
205 * kexec does not sync, or unmount filesystems so if you need
206 * that to happen you need to do that yourself.
209 static inline int kexec_load_check(unsigned long nr_segments,
214 /* We only trust the superuser with rebooting the system. */
215 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
218 /* Permit LSMs and IMA to fail the kexec */
219 result = security_kernel_load_data(LOADING_KEXEC_IMAGE, false);
224 * kexec can be used to circumvent module loading restrictions, so
225 * prevent loading in that case
227 result = security_locked_down(LOCKDOWN_KEXEC);
232 * Verify we have a legal set of flags
233 * This leaves us room for future extensions.
235 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
238 /* Put an artificial cap on the number
239 * of segments passed to kexec_load.
241 if (nr_segments > KEXEC_SEGMENT_MAX)
247 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
248 struct kexec_segment __user *, segments, unsigned long, flags)
252 result = kexec_load_check(nr_segments, flags);
256 /* Verify we are on the appropriate architecture */
257 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
258 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
261 result = do_kexec_load(entry, nr_segments, segments, flags);
267 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
268 compat_ulong_t, nr_segments,
269 struct compat_kexec_segment __user *, segments,
270 compat_ulong_t, flags)
272 struct compat_kexec_segment in;
273 struct kexec_segment out, __user *ksegments;
274 unsigned long i, result;
276 result = kexec_load_check(nr_segments, flags);
280 /* Don't allow clients that don't understand the native
281 * architecture to do anything.
283 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
286 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
287 for (i = 0; i < nr_segments; i++) {
288 result = copy_from_user(&in, &segments[i], sizeof(in));
292 out.buf = compat_ptr(in.buf);
293 out.bufsz = in.bufsz;
295 out.memsz = in.memsz;
297 result = copy_to_user(&ksegments[i], &out, sizeof(out));
302 result = do_kexec_load(entry, nr_segments, ksegments, flags);