GNU Linux-libre 4.19.268-gnu1
[releases.git] / kernel / livepatch / core.c
1 /*
2  * core.c - Kernel Live Patching Core
3  *
4  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5  * Copyright (C) 2014 SUSE
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/kallsyms.h>
29 #include <linux/livepatch.h>
30 #include <linux/elf.h>
31 #include <linux/moduleloader.h>
32 #include <linux/completion.h>
33 #include <linux/memory.h>
34 #include <asm/cacheflush.h>
35 #include "core.h"
36 #include "patch.h"
37 #include "transition.h"
38
39 /*
40  * klp_mutex is a coarse lock which serializes access to klp data.  All
41  * accesses to klp-related variables and structures must have mutex protection,
42  * except within the following functions which carefully avoid the need for it:
43  *
44  * - klp_ftrace_handler()
45  * - klp_update_patch_state()
46  */
47 DEFINE_MUTEX(klp_mutex);
48
49 static LIST_HEAD(klp_patches);
50
51 static struct kobject *klp_root_kobj;
52
53 static bool klp_is_module(struct klp_object *obj)
54 {
55         return obj->name;
56 }
57
58 /* sets obj->mod if object is not vmlinux and module is found */
59 static void klp_find_object_module(struct klp_object *obj)
60 {
61         struct module *mod;
62
63         if (!klp_is_module(obj))
64                 return;
65
66         mutex_lock(&module_mutex);
67         /*
68          * We do not want to block removal of patched modules and therefore
69          * we do not take a reference here. The patches are removed by
70          * klp_module_going() instead.
71          */
72         mod = find_module(obj->name);
73         /*
74          * Do not mess work of klp_module_coming() and klp_module_going().
75          * Note that the patch might still be needed before klp_module_going()
76          * is called. Module functions can be called even in the GOING state
77          * until mod->exit() finishes. This is especially important for
78          * patches that modify semantic of the functions.
79          */
80         if (mod && mod->klp_alive)
81                 obj->mod = mod;
82
83         mutex_unlock(&module_mutex);
84 }
85
86 static bool klp_is_patch_registered(struct klp_patch *patch)
87 {
88         struct klp_patch *mypatch;
89
90         list_for_each_entry(mypatch, &klp_patches, list)
91                 if (mypatch == patch)
92                         return true;
93
94         return false;
95 }
96
97 static bool klp_initialized(void)
98 {
99         return !!klp_root_kobj;
100 }
101
102 struct klp_find_arg {
103         const char *objname;
104         const char *name;
105         unsigned long addr;
106         unsigned long count;
107         unsigned long pos;
108 };
109
110 static int klp_find_callback(void *data, const char *name,
111                              struct module *mod, unsigned long addr)
112 {
113         struct klp_find_arg *args = data;
114
115         if ((mod && !args->objname) || (!mod && args->objname))
116                 return 0;
117
118         if (strcmp(args->name, name))
119                 return 0;
120
121         if (args->objname && strcmp(args->objname, mod->name))
122                 return 0;
123
124         args->addr = addr;
125         args->count++;
126
127         /*
128          * Finish the search when the symbol is found for the desired position
129          * or the position is not defined for a non-unique symbol.
130          */
131         if ((args->pos && (args->count == args->pos)) ||
132             (!args->pos && (args->count > 1)))
133                 return 1;
134
135         return 0;
136 }
137
138 static int klp_find_object_symbol(const char *objname, const char *name,
139                                   unsigned long sympos, unsigned long *addr)
140 {
141         struct klp_find_arg args = {
142                 .objname = objname,
143                 .name = name,
144                 .addr = 0,
145                 .count = 0,
146                 .pos = sympos,
147         };
148
149         mutex_lock(&module_mutex);
150         if (objname)
151                 module_kallsyms_on_each_symbol(klp_find_callback, &args);
152         else
153                 kallsyms_on_each_symbol(klp_find_callback, &args);
154         mutex_unlock(&module_mutex);
155
156         /*
157          * Ensure an address was found. If sympos is 0, ensure symbol is unique;
158          * otherwise ensure the symbol position count matches sympos.
159          */
160         if (args.addr == 0)
161                 pr_err("symbol '%s' not found in symbol table\n", name);
162         else if (args.count > 1 && sympos == 0) {
163                 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
164                        name, objname);
165         } else if (sympos != args.count && sympos > 0) {
166                 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
167                        sympos, name, objname ? objname : "vmlinux");
168         } else {
169                 *addr = args.addr;
170                 return 0;
171         }
172
173         *addr = 0;
174         return -EINVAL;
175 }
176
177 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
178 {
179         int i, cnt, vmlinux, ret;
180         char objname[MODULE_NAME_LEN];
181         char symname[KSYM_NAME_LEN];
182         char *strtab = pmod->core_kallsyms.strtab;
183         Elf_Rela *relas;
184         Elf_Sym *sym;
185         unsigned long sympos, addr;
186
187         /*
188          * Since the field widths for objname and symname in the sscanf()
189          * call are hard-coded and correspond to MODULE_NAME_LEN and
190          * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
191          * and KSYM_NAME_LEN have the values we expect them to have.
192          *
193          * Because the value of MODULE_NAME_LEN can differ among architectures,
194          * we use the smallest/strictest upper bound possible (56, based on
195          * the current definition of MODULE_NAME_LEN) to prevent overflows.
196          */
197         BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
198
199         relas = (Elf_Rela *) relasec->sh_addr;
200         /* For each rela in this klp relocation section */
201         for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
202                 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
203                 if (sym->st_shndx != SHN_LIVEPATCH) {
204                         pr_err("symbol %s is not marked as a livepatch symbol\n",
205                                strtab + sym->st_name);
206                         return -EINVAL;
207                 }
208
209                 /* Format: .klp.sym.objname.symname,sympos */
210                 cnt = sscanf(strtab + sym->st_name,
211                              ".klp.sym.%55[^.].%127[^,],%lu",
212                              objname, symname, &sympos);
213                 if (cnt != 3) {
214                         pr_err("symbol %s has an incorrectly formatted name\n",
215                                strtab + sym->st_name);
216                         return -EINVAL;
217                 }
218
219                 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
220                 vmlinux = !strcmp(objname, "vmlinux");
221                 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
222                                              symname, sympos, &addr);
223                 if (ret)
224                         return ret;
225
226                 sym->st_value = addr;
227         }
228
229         return 0;
230 }
231
232 static int klp_write_object_relocations(struct module *pmod,
233                                         struct klp_object *obj)
234 {
235         int i, cnt, ret = 0;
236         const char *objname, *secname;
237         char sec_objname[MODULE_NAME_LEN];
238         Elf_Shdr *sec;
239
240         if (WARN_ON(!klp_is_object_loaded(obj)))
241                 return -EINVAL;
242
243         objname = klp_is_module(obj) ? obj->name : "vmlinux";
244
245         /* For each klp relocation section */
246         for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
247                 sec = pmod->klp_info->sechdrs + i;
248                 secname = pmod->klp_info->secstrings + sec->sh_name;
249                 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
250                         continue;
251
252                 /*
253                  * Format: .klp.rela.sec_objname.section_name
254                  * See comment in klp_resolve_symbols() for an explanation
255                  * of the selected field width value.
256                  */
257                 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
258                 if (cnt != 1) {
259                         pr_err("section %s has an incorrectly formatted name\n",
260                                secname);
261                         ret = -EINVAL;
262                         break;
263                 }
264
265                 if (strcmp(objname, sec_objname))
266                         continue;
267
268                 ret = klp_resolve_symbols(sec, pmod);
269                 if (ret)
270                         break;
271
272                 ret = apply_relocate_add(pmod->klp_info->sechdrs,
273                                          pmod->core_kallsyms.strtab,
274                                          pmod->klp_info->symndx, i, pmod);
275                 if (ret)
276                         break;
277         }
278
279         return ret;
280 }
281
282 static int __klp_disable_patch(struct klp_patch *patch)
283 {
284         struct klp_object *obj;
285
286         if (WARN_ON(!patch->enabled))
287                 return -EINVAL;
288
289         if (klp_transition_patch)
290                 return -EBUSY;
291
292         /* enforce stacking: only the last enabled patch can be disabled */
293         if (!list_is_last(&patch->list, &klp_patches) &&
294             list_next_entry(patch, list)->enabled)
295                 return -EBUSY;
296
297         klp_init_transition(patch, KLP_UNPATCHED);
298
299         klp_for_each_object(patch, obj)
300                 if (obj->patched)
301                         klp_pre_unpatch_callback(obj);
302
303         /*
304          * Enforce the order of the func->transition writes in
305          * klp_init_transition() and the TIF_PATCH_PENDING writes in
306          * klp_start_transition().  In the rare case where klp_ftrace_handler()
307          * is called shortly after klp_update_patch_state() switches the task,
308          * this ensures the handler sees that func->transition is set.
309          */
310         smp_wmb();
311
312         klp_start_transition();
313         klp_try_complete_transition();
314         patch->enabled = false;
315
316         return 0;
317 }
318
319 /**
320  * klp_disable_patch() - disables a registered patch
321  * @patch:      The registered, enabled patch to be disabled
322  *
323  * Unregisters the patched functions from ftrace.
324  *
325  * Return: 0 on success, otherwise error
326  */
327 int klp_disable_patch(struct klp_patch *patch)
328 {
329         int ret;
330
331         mutex_lock(&klp_mutex);
332
333         if (!klp_is_patch_registered(patch)) {
334                 ret = -EINVAL;
335                 goto err;
336         }
337
338         if (!patch->enabled) {
339                 ret = -EINVAL;
340                 goto err;
341         }
342
343         ret = __klp_disable_patch(patch);
344
345 err:
346         mutex_unlock(&klp_mutex);
347         return ret;
348 }
349 EXPORT_SYMBOL_GPL(klp_disable_patch);
350
351 static int __klp_enable_patch(struct klp_patch *patch)
352 {
353         struct klp_object *obj;
354         int ret;
355
356         if (klp_transition_patch)
357                 return -EBUSY;
358
359         if (WARN_ON(patch->enabled))
360                 return -EINVAL;
361
362         /* enforce stacking: only the first disabled patch can be enabled */
363         if (patch->list.prev != &klp_patches &&
364             !list_prev_entry(patch, list)->enabled)
365                 return -EBUSY;
366
367         /*
368          * A reference is taken on the patch module to prevent it from being
369          * unloaded.
370          */
371         if (!try_module_get(patch->mod))
372                 return -ENODEV;
373
374         pr_notice("enabling patch '%s'\n", patch->mod->name);
375
376         klp_init_transition(patch, KLP_PATCHED);
377
378         /*
379          * Enforce the order of the func->transition writes in
380          * klp_init_transition() and the ops->func_stack writes in
381          * klp_patch_object(), so that klp_ftrace_handler() will see the
382          * func->transition updates before the handler is registered and the
383          * new funcs become visible to the handler.
384          */
385         smp_wmb();
386
387         klp_for_each_object(patch, obj) {
388                 if (!klp_is_object_loaded(obj))
389                         continue;
390
391                 ret = klp_pre_patch_callback(obj);
392                 if (ret) {
393                         pr_warn("pre-patch callback failed for object '%s'\n",
394                                 klp_is_module(obj) ? obj->name : "vmlinux");
395                         goto err;
396                 }
397
398                 ret = klp_patch_object(obj);
399                 if (ret) {
400                         pr_warn("failed to patch object '%s'\n",
401                                 klp_is_module(obj) ? obj->name : "vmlinux");
402                         goto err;
403                 }
404         }
405
406         klp_start_transition();
407         klp_try_complete_transition();
408         patch->enabled = true;
409
410         return 0;
411 err:
412         pr_warn("failed to enable patch '%s'\n", patch->mod->name);
413
414         klp_cancel_transition();
415         return ret;
416 }
417
418 /**
419  * klp_enable_patch() - enables a registered patch
420  * @patch:      The registered, disabled patch to be enabled
421  *
422  * Performs the needed symbol lookups and code relocations,
423  * then registers the patched functions with ftrace.
424  *
425  * Return: 0 on success, otherwise error
426  */
427 int klp_enable_patch(struct klp_patch *patch)
428 {
429         int ret;
430
431         mutex_lock(&klp_mutex);
432
433         if (!klp_is_patch_registered(patch)) {
434                 ret = -EINVAL;
435                 goto err;
436         }
437
438         ret = __klp_enable_patch(patch);
439
440 err:
441         mutex_unlock(&klp_mutex);
442         return ret;
443 }
444 EXPORT_SYMBOL_GPL(klp_enable_patch);
445
446 /*
447  * Sysfs Interface
448  *
449  * /sys/kernel/livepatch
450  * /sys/kernel/livepatch/<patch>
451  * /sys/kernel/livepatch/<patch>/enabled
452  * /sys/kernel/livepatch/<patch>/transition
453  * /sys/kernel/livepatch/<patch>/signal
454  * /sys/kernel/livepatch/<patch>/force
455  * /sys/kernel/livepatch/<patch>/<object>
456  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
457  */
458
459 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
460                              const char *buf, size_t count)
461 {
462         struct klp_patch *patch;
463         int ret;
464         bool enabled;
465
466         ret = kstrtobool(buf, &enabled);
467         if (ret)
468                 return ret;
469
470         patch = container_of(kobj, struct klp_patch, kobj);
471
472         mutex_lock(&klp_mutex);
473
474         if (!klp_is_patch_registered(patch)) {
475                 /*
476                  * Module with the patch could either disappear meanwhile or is
477                  * not properly initialized yet.
478                  */
479                 ret = -EINVAL;
480                 goto err;
481         }
482
483         if (patch->enabled == enabled) {
484                 /* already in requested state */
485                 ret = -EINVAL;
486                 goto err;
487         }
488
489         if (patch == klp_transition_patch) {
490                 klp_reverse_transition();
491         } else if (enabled) {
492                 ret = __klp_enable_patch(patch);
493                 if (ret)
494                         goto err;
495         } else {
496                 ret = __klp_disable_patch(patch);
497                 if (ret)
498                         goto err;
499         }
500
501         mutex_unlock(&klp_mutex);
502
503         return count;
504
505 err:
506         mutex_unlock(&klp_mutex);
507         return ret;
508 }
509
510 static ssize_t enabled_show(struct kobject *kobj,
511                             struct kobj_attribute *attr, char *buf)
512 {
513         struct klp_patch *patch;
514
515         patch = container_of(kobj, struct klp_patch, kobj);
516         return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
517 }
518
519 static ssize_t transition_show(struct kobject *kobj,
520                                struct kobj_attribute *attr, char *buf)
521 {
522         struct klp_patch *patch;
523
524         patch = container_of(kobj, struct klp_patch, kobj);
525         return snprintf(buf, PAGE_SIZE-1, "%d\n",
526                         patch == klp_transition_patch);
527 }
528
529 static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
530                             const char *buf, size_t count)
531 {
532         struct klp_patch *patch;
533         int ret;
534         bool val;
535
536         ret = kstrtobool(buf, &val);
537         if (ret)
538                 return ret;
539
540         if (!val)
541                 return count;
542
543         mutex_lock(&klp_mutex);
544
545         patch = container_of(kobj, struct klp_patch, kobj);
546         if (patch != klp_transition_patch) {
547                 mutex_unlock(&klp_mutex);
548                 return -EINVAL;
549         }
550
551         klp_send_signals();
552
553         mutex_unlock(&klp_mutex);
554
555         return count;
556 }
557
558 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
559                            const char *buf, size_t count)
560 {
561         struct klp_patch *patch;
562         int ret;
563         bool val;
564
565         ret = kstrtobool(buf, &val);
566         if (ret)
567                 return ret;
568
569         if (!val)
570                 return count;
571
572         mutex_lock(&klp_mutex);
573
574         patch = container_of(kobj, struct klp_patch, kobj);
575         if (patch != klp_transition_patch) {
576                 mutex_unlock(&klp_mutex);
577                 return -EINVAL;
578         }
579
580         klp_force_transition();
581
582         mutex_unlock(&klp_mutex);
583
584         return count;
585 }
586
587 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
588 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
589 static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
590 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
591 static struct attribute *klp_patch_attrs[] = {
592         &enabled_kobj_attr.attr,
593         &transition_kobj_attr.attr,
594         &signal_kobj_attr.attr,
595         &force_kobj_attr.attr,
596         NULL
597 };
598
599 static void klp_kobj_release_patch(struct kobject *kobj)
600 {
601         struct klp_patch *patch;
602
603         patch = container_of(kobj, struct klp_patch, kobj);
604         complete(&patch->finish);
605 }
606
607 static struct kobj_type klp_ktype_patch = {
608         .release = klp_kobj_release_patch,
609         .sysfs_ops = &kobj_sysfs_ops,
610         .default_attrs = klp_patch_attrs,
611 };
612
613 static void klp_kobj_release_object(struct kobject *kobj)
614 {
615 }
616
617 static struct kobj_type klp_ktype_object = {
618         .release = klp_kobj_release_object,
619         .sysfs_ops = &kobj_sysfs_ops,
620 };
621
622 static void klp_kobj_release_func(struct kobject *kobj)
623 {
624 }
625
626 static struct kobj_type klp_ktype_func = {
627         .release = klp_kobj_release_func,
628         .sysfs_ops = &kobj_sysfs_ops,
629 };
630
631 /*
632  * Free all functions' kobjects in the array up to some limit. When limit is
633  * NULL, all kobjects are freed.
634  */
635 static void klp_free_funcs_limited(struct klp_object *obj,
636                                    struct klp_func *limit)
637 {
638         struct klp_func *func;
639
640         for (func = obj->funcs; func->old_name && func != limit; func++)
641                 kobject_put(&func->kobj);
642 }
643
644 /* Clean up when a patched object is unloaded */
645 static void klp_free_object_loaded(struct klp_object *obj)
646 {
647         struct klp_func *func;
648
649         obj->mod = NULL;
650
651         klp_for_each_func(obj, func)
652                 func->old_addr = 0;
653 }
654
655 /*
656  * Free all objects' kobjects in the array up to some limit. When limit is
657  * NULL, all kobjects are freed.
658  */
659 static void klp_free_objects_limited(struct klp_patch *patch,
660                                      struct klp_object *limit)
661 {
662         struct klp_object *obj;
663
664         for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
665                 klp_free_funcs_limited(obj, NULL);
666                 kobject_put(&obj->kobj);
667         }
668 }
669
670 static void klp_free_patch(struct klp_patch *patch)
671 {
672         klp_free_objects_limited(patch, NULL);
673         if (!list_empty(&patch->list))
674                 list_del(&patch->list);
675 }
676
677 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
678 {
679         if (!func->old_name || !func->new_func)
680                 return -EINVAL;
681
682         if (strlen(func->old_name) >= KSYM_NAME_LEN)
683                 return -EINVAL;
684
685         INIT_LIST_HEAD(&func->stack_node);
686         func->patched = false;
687         func->transition = false;
688
689         /* The format for the sysfs directory is <function,sympos> where sympos
690          * is the nth occurrence of this symbol in kallsyms for the patched
691          * object. If the user selects 0 for old_sympos, then 1 will be used
692          * since a unique symbol will be the first occurrence.
693          */
694         return kobject_init_and_add(&func->kobj, &klp_ktype_func,
695                                     &obj->kobj, "%s,%lu", func->old_name,
696                                     func->old_sympos ? func->old_sympos : 1);
697 }
698
699 /* Arches may override this to finish any remaining arch-specific tasks */
700 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
701                                         struct klp_object *obj)
702 {
703 }
704
705 /* parts of the initialization that is done only when the object is loaded */
706 static int klp_init_object_loaded(struct klp_patch *patch,
707                                   struct klp_object *obj)
708 {
709         struct klp_func *func;
710         int ret;
711
712         mutex_lock(&text_mutex);
713
714         module_disable_ro(patch->mod);
715         ret = klp_write_object_relocations(patch->mod, obj);
716         if (ret) {
717                 module_enable_ro(patch->mod, true);
718                 mutex_unlock(&text_mutex);
719                 return ret;
720         }
721
722         arch_klp_init_object_loaded(patch, obj);
723         module_enable_ro(patch->mod, true);
724
725         mutex_unlock(&text_mutex);
726
727         klp_for_each_func(obj, func) {
728                 ret = klp_find_object_symbol(obj->name, func->old_name,
729                                              func->old_sympos,
730                                              &func->old_addr);
731                 if (ret)
732                         return ret;
733
734                 ret = kallsyms_lookup_size_offset(func->old_addr,
735                                                   &func->old_size, NULL);
736                 if (!ret) {
737                         pr_err("kallsyms size lookup failed for '%s'\n",
738                                func->old_name);
739                         return -ENOENT;
740                 }
741
742                 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
743                                                   &func->new_size, NULL);
744                 if (!ret) {
745                         pr_err("kallsyms size lookup failed for '%s' replacement\n",
746                                func->old_name);
747                         return -ENOENT;
748                 }
749         }
750
751         return 0;
752 }
753
754 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
755 {
756         struct klp_func *func;
757         int ret;
758         const char *name;
759
760         if (!obj->funcs)
761                 return -EINVAL;
762
763         if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
764                 return -EINVAL;
765
766         obj->patched = false;
767         obj->mod = NULL;
768
769         klp_find_object_module(obj);
770
771         name = klp_is_module(obj) ? obj->name : "vmlinux";
772         ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
773                                    &patch->kobj, "%s", name);
774         if (ret)
775                 return ret;
776
777         klp_for_each_func(obj, func) {
778                 ret = klp_init_func(obj, func);
779                 if (ret)
780                         goto free;
781         }
782
783         if (klp_is_object_loaded(obj)) {
784                 ret = klp_init_object_loaded(patch, obj);
785                 if (ret)
786                         goto free;
787         }
788
789         return 0;
790
791 free:
792         klp_free_funcs_limited(obj, func);
793         kobject_put(&obj->kobj);
794         return ret;
795 }
796
797 static int klp_init_patch(struct klp_patch *patch)
798 {
799         struct klp_object *obj;
800         int ret;
801
802         if (!patch->objs)
803                 return -EINVAL;
804
805         mutex_lock(&klp_mutex);
806
807         patch->enabled = false;
808         init_completion(&patch->finish);
809
810         ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
811                                    klp_root_kobj, "%s", patch->mod->name);
812         if (ret) {
813                 mutex_unlock(&klp_mutex);
814                 return ret;
815         }
816
817         klp_for_each_object(patch, obj) {
818                 ret = klp_init_object(patch, obj);
819                 if (ret)
820                         goto free;
821         }
822
823         list_add_tail(&patch->list, &klp_patches);
824
825         mutex_unlock(&klp_mutex);
826
827         return 0;
828
829 free:
830         klp_free_objects_limited(patch, obj);
831
832         mutex_unlock(&klp_mutex);
833
834         kobject_put(&patch->kobj);
835         wait_for_completion(&patch->finish);
836
837         return ret;
838 }
839
840 /**
841  * klp_unregister_patch() - unregisters a patch
842  * @patch:      Disabled patch to be unregistered
843  *
844  * Frees the data structures and removes the sysfs interface.
845  *
846  * Return: 0 on success, otherwise error
847  */
848 int klp_unregister_patch(struct klp_patch *patch)
849 {
850         int ret;
851
852         mutex_lock(&klp_mutex);
853
854         if (!klp_is_patch_registered(patch)) {
855                 ret = -EINVAL;
856                 goto err;
857         }
858
859         if (patch->enabled) {
860                 ret = -EBUSY;
861                 goto err;
862         }
863
864         klp_free_patch(patch);
865
866         mutex_unlock(&klp_mutex);
867
868         kobject_put(&patch->kobj);
869         wait_for_completion(&patch->finish);
870
871         return 0;
872 err:
873         mutex_unlock(&klp_mutex);
874         return ret;
875 }
876 EXPORT_SYMBOL_GPL(klp_unregister_patch);
877
878 /**
879  * klp_register_patch() - registers a patch
880  * @patch:      Patch to be registered
881  *
882  * Initializes the data structure associated with the patch and
883  * creates the sysfs interface.
884  *
885  * There is no need to take the reference on the patch module here. It is done
886  * later when the patch is enabled.
887  *
888  * Return: 0 on success, otherwise error
889  */
890 int klp_register_patch(struct klp_patch *patch)
891 {
892         if (!patch || !patch->mod)
893                 return -EINVAL;
894
895         if (!is_livepatch_module(patch->mod)) {
896                 pr_err("module %s is not marked as a livepatch module\n",
897                        patch->mod->name);
898                 return -EINVAL;
899         }
900
901         if (!klp_initialized())
902                 return -ENODEV;
903
904         if (!klp_have_reliable_stack()) {
905                 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
906                 return -ENOSYS;
907         }
908
909         return klp_init_patch(patch);
910 }
911 EXPORT_SYMBOL_GPL(klp_register_patch);
912
913 /*
914  * Remove parts of patches that touch a given kernel module. The list of
915  * patches processed might be limited. When limit is NULL, all patches
916  * will be handled.
917  */
918 static void klp_cleanup_module_patches_limited(struct module *mod,
919                                                struct klp_patch *limit)
920 {
921         struct klp_patch *patch;
922         struct klp_object *obj;
923
924         list_for_each_entry(patch, &klp_patches, list) {
925                 if (patch == limit)
926                         break;
927
928                 klp_for_each_object(patch, obj) {
929                         if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
930                                 continue;
931
932                         /*
933                          * Only unpatch the module if the patch is enabled or
934                          * is in transition.
935                          */
936                         if (patch->enabled || patch == klp_transition_patch) {
937
938                                 if (patch != klp_transition_patch)
939                                         klp_pre_unpatch_callback(obj);
940
941                                 pr_notice("reverting patch '%s' on unloading module '%s'\n",
942                                           patch->mod->name, obj->mod->name);
943                                 klp_unpatch_object(obj);
944
945                                 klp_post_unpatch_callback(obj);
946                         }
947
948                         klp_free_object_loaded(obj);
949                         break;
950                 }
951         }
952 }
953
954 int klp_module_coming(struct module *mod)
955 {
956         int ret;
957         struct klp_patch *patch;
958         struct klp_object *obj;
959
960         if (WARN_ON(mod->state != MODULE_STATE_COMING))
961                 return -EINVAL;
962
963         mutex_lock(&klp_mutex);
964         /*
965          * Each module has to know that klp_module_coming()
966          * has been called. We never know what module will
967          * get patched by a new patch.
968          */
969         mod->klp_alive = true;
970
971         list_for_each_entry(patch, &klp_patches, list) {
972                 klp_for_each_object(patch, obj) {
973                         if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
974                                 continue;
975
976                         obj->mod = mod;
977
978                         ret = klp_init_object_loaded(patch, obj);
979                         if (ret) {
980                                 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
981                                         patch->mod->name, obj->mod->name, ret);
982                                 goto err;
983                         }
984
985                         /*
986                          * Only patch the module if the patch is enabled or is
987                          * in transition.
988                          */
989                         if (!patch->enabled && patch != klp_transition_patch)
990                                 break;
991
992                         pr_notice("applying patch '%s' to loading module '%s'\n",
993                                   patch->mod->name, obj->mod->name);
994
995                         ret = klp_pre_patch_callback(obj);
996                         if (ret) {
997                                 pr_warn("pre-patch callback failed for object '%s'\n",
998                                         obj->name);
999                                 goto err;
1000                         }
1001
1002                         ret = klp_patch_object(obj);
1003                         if (ret) {
1004                                 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1005                                         patch->mod->name, obj->mod->name, ret);
1006
1007                                 klp_post_unpatch_callback(obj);
1008                                 goto err;
1009                         }
1010
1011                         if (patch != klp_transition_patch)
1012                                 klp_post_patch_callback(obj);
1013
1014                         break;
1015                 }
1016         }
1017
1018         mutex_unlock(&klp_mutex);
1019
1020         return 0;
1021
1022 err:
1023         /*
1024          * If a patch is unsuccessfully applied, return
1025          * error to the module loader.
1026          */
1027         pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1028                 patch->mod->name, obj->mod->name, obj->mod->name);
1029         mod->klp_alive = false;
1030         obj->mod = NULL;
1031         klp_cleanup_module_patches_limited(mod, patch);
1032         mutex_unlock(&klp_mutex);
1033
1034         return ret;
1035 }
1036
1037 void klp_module_going(struct module *mod)
1038 {
1039         if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1040                     mod->state != MODULE_STATE_COMING))
1041                 return;
1042
1043         mutex_lock(&klp_mutex);
1044         /*
1045          * Each module has to know that klp_module_going()
1046          * has been called. We never know what module will
1047          * get patched by a new patch.
1048          */
1049         mod->klp_alive = false;
1050
1051         klp_cleanup_module_patches_limited(mod, NULL);
1052
1053         mutex_unlock(&klp_mutex);
1054 }
1055
1056 static int __init klp_init(void)
1057 {
1058         int ret;
1059
1060         ret = klp_check_compiler_support();
1061         if (ret) {
1062                 pr_info("Your compiler is too old; turning off.\n");
1063                 return -EINVAL;
1064         }
1065
1066         klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1067         if (!klp_root_kobj)
1068                 return -ENOMEM;
1069
1070         return 0;
1071 }
1072
1073 module_init(klp_init);