GNU Linux-libre 5.19-rc6-gnu
[releases.git] / tools / perf / util / bpf-loader.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bpf-loader.c
4  *
5  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6  * Copyright (C) 2015 Huawei Inc.
7  */
8
9 #include <linux/bpf.h>
10 #include <bpf/libbpf.h>
11 #include <bpf/bpf.h>
12 #include <linux/err.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/zalloc.h>
16 #include <errno.h>
17 #include <stdlib.h>
18 #include "debug.h"
19 #include "evlist.h"
20 #include "bpf-loader.h"
21 #include "bpf-prologue.h"
22 #include "probe-event.h"
23 #include "probe-finder.h" // for MAX_PROBES
24 #include "parse-events.h"
25 #include "strfilter.h"
26 #include "util.h"
27 #include "llvm-utils.h"
28 #include "c++/clang-c.h"
29 #include "hashmap.h"
30 #include "asm/bug.h"
31
32 #include <internal/xyarray.h>
33
34 /* temporarily disable libbpf deprecation warnings */
35 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
36
37 static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
38                               const char *fmt, va_list args)
39 {
40         return veprintf(1, verbose, pr_fmt(fmt), args);
41 }
42
43 struct bpf_prog_priv {
44         bool is_tp;
45         char *sys_name;
46         char *evt_name;
47         struct perf_probe_event pev;
48         bool need_prologue;
49         struct bpf_insn *insns_buf;
50         int nr_types;
51         int *type_mapping;
52 };
53
54 struct bpf_perf_object {
55         struct list_head list;
56         struct bpf_object *obj;
57 };
58
59 static LIST_HEAD(bpf_objects_list);
60 static struct hashmap *bpf_program_hash;
61 static struct hashmap *bpf_map_hash;
62
63 static struct bpf_perf_object *
64 bpf_perf_object__next(struct bpf_perf_object *prev)
65 {
66         struct bpf_perf_object *next;
67
68         if (!prev)
69                 next = list_first_entry(&bpf_objects_list,
70                                         struct bpf_perf_object,
71                                         list);
72         else
73                 next = list_next_entry(prev, list);
74
75         /* Empty list is noticed here so don't need checking on entry. */
76         if (&next->list == &bpf_objects_list)
77                 return NULL;
78
79         return next;
80 }
81
82 #define bpf_perf_object__for_each(perf_obj, tmp)        \
83         for ((perf_obj) = bpf_perf_object__next(NULL),  \
84              (tmp) = bpf_perf_object__next(perf_obj);   \
85              (perf_obj) != NULL;                        \
86              (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
87
88 static bool libbpf_initialized;
89
90 static int bpf_perf_object__add(struct bpf_object *obj)
91 {
92         struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
93
94         if (perf_obj) {
95                 INIT_LIST_HEAD(&perf_obj->list);
96                 perf_obj->obj = obj;
97                 list_add_tail(&perf_obj->list, &bpf_objects_list);
98         }
99         return perf_obj ? 0 : -ENOMEM;
100 }
101
102 static int libbpf_init(void)
103 {
104         if (libbpf_initialized)
105                 return 0;
106
107         libbpf_set_print(libbpf_perf_print);
108         libbpf_initialized = true;
109         return 0;
110 }
111
112 struct bpf_object *
113 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
114 {
115         LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
116         struct bpf_object *obj;
117         int err;
118
119         err = libbpf_init();
120         if (err)
121                 return ERR_PTR(err);
122
123         obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
124         if (IS_ERR_OR_NULL(obj)) {
125                 pr_debug("bpf: failed to load buffer\n");
126                 return ERR_PTR(-EINVAL);
127         }
128
129         if (bpf_perf_object__add(obj)) {
130                 bpf_object__close(obj);
131                 return ERR_PTR(-ENOMEM);
132         }
133
134         return obj;
135 }
136
137 static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
138 {
139         list_del(&perf_obj->list);
140         bpf_object__close(perf_obj->obj);
141         free(perf_obj);
142 }
143
144 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
145 {
146         LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
147         struct bpf_object *obj;
148         int err;
149
150         err = libbpf_init();
151         if (err)
152                 return ERR_PTR(err);
153
154         if (source) {
155                 void *obj_buf;
156                 size_t obj_buf_sz;
157
158                 perf_clang__init();
159                 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
160                 perf_clang__cleanup();
161                 if (err) {
162                         pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
163                         err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
164                         if (err)
165                                 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
166                 } else
167                         pr_debug("bpf: successful builtin compilation\n");
168                 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
169
170                 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
171                         llvm__dump_obj(filename, obj_buf, obj_buf_sz);
172
173                 free(obj_buf);
174         } else {
175                 obj = bpf_object__open(filename);
176         }
177
178         if (IS_ERR_OR_NULL(obj)) {
179                 pr_debug("bpf: failed to load %s\n", filename);
180                 return obj;
181         }
182
183         if (bpf_perf_object__add(obj)) {
184                 bpf_object__close(obj);
185                 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
186         }
187
188         return obj;
189 }
190
191 static void
192 clear_prog_priv(const struct bpf_program *prog __maybe_unused,
193                 void *_priv)
194 {
195         struct bpf_prog_priv *priv = _priv;
196
197         cleanup_perf_probe_events(&priv->pev, 1);
198         zfree(&priv->insns_buf);
199         zfree(&priv->type_mapping);
200         zfree(&priv->sys_name);
201         zfree(&priv->evt_name);
202         free(priv);
203 }
204
205 static void bpf_program_hash_free(void)
206 {
207         struct hashmap_entry *cur;
208         size_t bkt;
209
210         if (IS_ERR_OR_NULL(bpf_program_hash))
211                 return;
212
213         hashmap__for_each_entry(bpf_program_hash, cur, bkt)
214                 clear_prog_priv(cur->key, cur->value);
215
216         hashmap__free(bpf_program_hash);
217         bpf_program_hash = NULL;
218 }
219
220 static void bpf_map_hash_free(void);
221
222 void bpf__clear(void)
223 {
224         struct bpf_perf_object *perf_obj, *tmp;
225
226         bpf_perf_object__for_each(perf_obj, tmp) {
227                 bpf__unprobe(perf_obj->obj);
228                 bpf_perf_object__close(perf_obj);
229         }
230
231         bpf_program_hash_free();
232         bpf_map_hash_free();
233 }
234
235 static size_t ptr_hash(const void *__key, void *ctx __maybe_unused)
236 {
237         return (size_t) __key;
238 }
239
240 static bool ptr_equal(const void *key1, const void *key2,
241                           void *ctx __maybe_unused)
242 {
243         return key1 == key2;
244 }
245
246 static void *program_priv(const struct bpf_program *prog)
247 {
248         void *priv;
249
250         if (IS_ERR_OR_NULL(bpf_program_hash))
251                 return NULL;
252         if (!hashmap__find(bpf_program_hash, prog, &priv))
253                 return NULL;
254         return priv;
255 }
256
257 static int program_set_priv(struct bpf_program *prog, void *priv)
258 {
259         void *old_priv;
260
261         /*
262          * Should not happen, we warn about it in the
263          * caller function - config_bpf_program
264          */
265         if (IS_ERR(bpf_program_hash))
266                 return PTR_ERR(bpf_program_hash);
267
268         if (!bpf_program_hash) {
269                 bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
270                 if (IS_ERR(bpf_program_hash))
271                         return PTR_ERR(bpf_program_hash);
272         }
273
274         old_priv = program_priv(prog);
275         if (old_priv) {
276                 clear_prog_priv(prog, old_priv);
277                 return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
278         }
279         return hashmap__add(bpf_program_hash, prog, priv);
280 }
281
282 static int
283 prog_config__exec(const char *value, struct perf_probe_event *pev)
284 {
285         pev->uprobes = true;
286         pev->target = strdup(value);
287         if (!pev->target)
288                 return -ENOMEM;
289         return 0;
290 }
291
292 static int
293 prog_config__module(const char *value, struct perf_probe_event *pev)
294 {
295         pev->uprobes = false;
296         pev->target = strdup(value);
297         if (!pev->target)
298                 return -ENOMEM;
299         return 0;
300 }
301
302 static int
303 prog_config__bool(const char *value, bool *pbool, bool invert)
304 {
305         int err;
306         bool bool_value;
307
308         if (!pbool)
309                 return -EINVAL;
310
311         err = strtobool(value, &bool_value);
312         if (err)
313                 return err;
314
315         *pbool = invert ? !bool_value : bool_value;
316         return 0;
317 }
318
319 static int
320 prog_config__inlines(const char *value,
321                      struct perf_probe_event *pev __maybe_unused)
322 {
323         return prog_config__bool(value, &probe_conf.no_inlines, true);
324 }
325
326 static int
327 prog_config__force(const char *value,
328                    struct perf_probe_event *pev __maybe_unused)
329 {
330         return prog_config__bool(value, &probe_conf.force_add, false);
331 }
332
333 static struct {
334         const char *key;
335         const char *usage;
336         const char *desc;
337         int (*func)(const char *, struct perf_probe_event *);
338 } bpf_prog_config_terms[] = {
339         {
340                 .key    = "exec",
341                 .usage  = "exec=<full path of file>",
342                 .desc   = "Set uprobe target",
343                 .func   = prog_config__exec,
344         },
345         {
346                 .key    = "module",
347                 .usage  = "module=<module name>    ",
348                 .desc   = "Set kprobe module",
349                 .func   = prog_config__module,
350         },
351         {
352                 .key    = "inlines",
353                 .usage  = "inlines=[yes|no]        ",
354                 .desc   = "Probe at inline symbol",
355                 .func   = prog_config__inlines,
356         },
357         {
358                 .key    = "force",
359                 .usage  = "force=[yes|no]          ",
360                 .desc   = "Forcibly add events with existing name",
361                 .func   = prog_config__force,
362         },
363 };
364
365 static int
366 do_prog_config(const char *key, const char *value,
367                struct perf_probe_event *pev)
368 {
369         unsigned int i;
370
371         pr_debug("config bpf program: %s=%s\n", key, value);
372         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
373                 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
374                         return bpf_prog_config_terms[i].func(value, pev);
375
376         pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
377                  key, value);
378
379         pr_debug("\nHint: Valid options are:\n");
380         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
381                 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
382                          bpf_prog_config_terms[i].desc);
383         pr_debug("\n");
384
385         return -BPF_LOADER_ERRNO__PROGCONF_TERM;
386 }
387
388 static const char *
389 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
390 {
391         char *text = strdup(config_str);
392         char *sep, *line;
393         const char *main_str = NULL;
394         int err = 0;
395
396         if (!text) {
397                 pr_debug("Not enough memory: dup config_str failed\n");
398                 return ERR_PTR(-ENOMEM);
399         }
400
401         line = text;
402         while ((sep = strchr(line, ';'))) {
403                 char *equ;
404
405                 *sep = '\0';
406                 equ = strchr(line, '=');
407                 if (!equ) {
408                         pr_warning("WARNING: invalid config in BPF object: %s\n",
409                                    line);
410                         pr_warning("\tShould be 'key=value'.\n");
411                         goto nextline;
412                 }
413                 *equ = '\0';
414
415                 err = do_prog_config(line, equ + 1, pev);
416                 if (err)
417                         break;
418 nextline:
419                 line = sep + 1;
420         }
421
422         if (!err)
423                 main_str = config_str + (line - text);
424         free(text);
425
426         return err ? ERR_PTR(err) : main_str;
427 }
428
429 static int
430 parse_prog_config(const char *config_str, const char **p_main_str,
431                   bool *is_tp, struct perf_probe_event *pev)
432 {
433         int err;
434         const char *main_str = parse_prog_config_kvpair(config_str, pev);
435
436         if (IS_ERR(main_str))
437                 return PTR_ERR(main_str);
438
439         *p_main_str = main_str;
440         if (!strchr(main_str, '=')) {
441                 /* Is a tracepoint event? */
442                 const char *s = strchr(main_str, ':');
443
444                 if (!s) {
445                         pr_debug("bpf: '%s' is not a valid tracepoint\n",
446                                  config_str);
447                         return -BPF_LOADER_ERRNO__CONFIG;
448                 }
449
450                 *is_tp = true;
451                 return 0;
452         }
453
454         *is_tp = false;
455         err = parse_perf_probe_command(main_str, pev);
456         if (err < 0) {
457                 pr_debug("bpf: '%s' is not a valid config string\n",
458                          config_str);
459                 /* parse failed, don't need clear pev. */
460                 return -BPF_LOADER_ERRNO__CONFIG;
461         }
462         return 0;
463 }
464
465 static int
466 config_bpf_program(struct bpf_program *prog)
467 {
468         struct perf_probe_event *pev = NULL;
469         struct bpf_prog_priv *priv = NULL;
470         const char *config_str, *main_str;
471         bool is_tp = false;
472         int err;
473
474         /* Initialize per-program probing setting */
475         probe_conf.no_inlines = false;
476         probe_conf.force_add = false;
477
478         priv = calloc(sizeof(*priv), 1);
479         if (!priv) {
480                 pr_debug("bpf: failed to alloc priv\n");
481                 return -ENOMEM;
482         }
483         pev = &priv->pev;
484
485         config_str = bpf_program__section_name(prog);
486         pr_debug("bpf: config program '%s'\n", config_str);
487         err = parse_prog_config(config_str, &main_str, &is_tp, pev);
488         if (err)
489                 goto errout;
490
491         if (is_tp) {
492                 char *s = strchr(main_str, ':');
493
494                 priv->is_tp = true;
495                 priv->sys_name = strndup(main_str, s - main_str);
496                 priv->evt_name = strdup(s + 1);
497                 goto set_priv;
498         }
499
500         if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
501                 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
502                          config_str, PERF_BPF_PROBE_GROUP);
503                 err = -BPF_LOADER_ERRNO__GROUP;
504                 goto errout;
505         } else if (!pev->group)
506                 pev->group = strdup(PERF_BPF_PROBE_GROUP);
507
508         if (!pev->group) {
509                 pr_debug("bpf: strdup failed\n");
510                 err = -ENOMEM;
511                 goto errout;
512         }
513
514         if (!pev->event) {
515                 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
516                          config_str);
517                 err = -BPF_LOADER_ERRNO__EVENTNAME;
518                 goto errout;
519         }
520         pr_debug("bpf: config '%s' is ok\n", config_str);
521
522 set_priv:
523         err = program_set_priv(prog, priv);
524         if (err) {
525                 pr_debug("Failed to set priv for program '%s'\n", config_str);
526                 goto errout;
527         }
528
529         return 0;
530
531 errout:
532         if (pev)
533                 clear_perf_probe_event(pev);
534         free(priv);
535         return err;
536 }
537
538 static int bpf__prepare_probe(void)
539 {
540         static int err = 0;
541         static bool initialized = false;
542
543         /*
544          * Make err static, so if init failed the first, bpf__prepare_probe()
545          * fails each time without calling init_probe_symbol_maps multiple
546          * times.
547          */
548         if (initialized)
549                 return err;
550
551         initialized = true;
552         err = init_probe_symbol_maps(false);
553         if (err < 0)
554                 pr_debug("Failed to init_probe_symbol_maps\n");
555         probe_conf.max_probes = MAX_PROBES;
556         return err;
557 }
558
559 static int
560 preproc_gen_prologue(struct bpf_program *prog, int n,
561                      struct bpf_insn *orig_insns, int orig_insns_cnt,
562                      struct bpf_prog_prep_result *res)
563 {
564         struct bpf_prog_priv *priv = program_priv(prog);
565         struct probe_trace_event *tev;
566         struct perf_probe_event *pev;
567         struct bpf_insn *buf;
568         size_t prologue_cnt = 0;
569         int i, err;
570
571         if (IS_ERR_OR_NULL(priv) || priv->is_tp)
572                 goto errout;
573
574         pev = &priv->pev;
575
576         if (n < 0 || n >= priv->nr_types)
577                 goto errout;
578
579         /* Find a tev belongs to that type */
580         for (i = 0; i < pev->ntevs; i++) {
581                 if (priv->type_mapping[i] == n)
582                         break;
583         }
584
585         if (i >= pev->ntevs) {
586                 pr_debug("Internal error: prologue type %d not found\n", n);
587                 return -BPF_LOADER_ERRNO__PROLOGUE;
588         }
589
590         tev = &pev->tevs[i];
591
592         buf = priv->insns_buf;
593         err = bpf__gen_prologue(tev->args, tev->nargs,
594                                 buf, &prologue_cnt,
595                                 BPF_MAXINSNS - orig_insns_cnt);
596         if (err) {
597                 const char *title;
598
599                 title = bpf_program__section_name(prog);
600                 pr_debug("Failed to generate prologue for program %s\n",
601                          title);
602                 return err;
603         }
604
605         memcpy(&buf[prologue_cnt], orig_insns,
606                sizeof(struct bpf_insn) * orig_insns_cnt);
607
608         res->new_insn_ptr = buf;
609         res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
610         res->pfd = NULL;
611         return 0;
612
613 errout:
614         pr_debug("Internal error in preproc_gen_prologue\n");
615         return -BPF_LOADER_ERRNO__PROLOGUE;
616 }
617
618 /*
619  * compare_tev_args is reflexive, transitive and antisymmetric.
620  * I can proof it but this margin is too narrow to contain.
621  */
622 static int compare_tev_args(const void *ptev1, const void *ptev2)
623 {
624         int i, ret;
625         const struct probe_trace_event *tev1 =
626                 *(const struct probe_trace_event **)ptev1;
627         const struct probe_trace_event *tev2 =
628                 *(const struct probe_trace_event **)ptev2;
629
630         ret = tev2->nargs - tev1->nargs;
631         if (ret)
632                 return ret;
633
634         for (i = 0; i < tev1->nargs; i++) {
635                 struct probe_trace_arg *arg1, *arg2;
636                 struct probe_trace_arg_ref *ref1, *ref2;
637
638                 arg1 = &tev1->args[i];
639                 arg2 = &tev2->args[i];
640
641                 ret = strcmp(arg1->value, arg2->value);
642                 if (ret)
643                         return ret;
644
645                 ref1 = arg1->ref;
646                 ref2 = arg2->ref;
647
648                 while (ref1 && ref2) {
649                         ret = ref2->offset - ref1->offset;
650                         if (ret)
651                                 return ret;
652
653                         ref1 = ref1->next;
654                         ref2 = ref2->next;
655                 }
656
657                 if (ref1 || ref2)
658                         return ref2 ? 1 : -1;
659         }
660
661         return 0;
662 }
663
664 /*
665  * Assign a type number to each tevs in a pev.
666  * mapping is an array with same slots as tevs in that pev.
667  * nr_types will be set to number of types.
668  */
669 static int map_prologue(struct perf_probe_event *pev, int *mapping,
670                         int *nr_types)
671 {
672         int i, type = 0;
673         struct probe_trace_event **ptevs;
674
675         size_t array_sz = sizeof(*ptevs) * pev->ntevs;
676
677         ptevs = malloc(array_sz);
678         if (!ptevs) {
679                 pr_debug("Not enough memory: alloc ptevs failed\n");
680                 return -ENOMEM;
681         }
682
683         pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
684         for (i = 0; i < pev->ntevs; i++)
685                 ptevs[i] = &pev->tevs[i];
686
687         qsort(ptevs, pev->ntevs, sizeof(*ptevs),
688               compare_tev_args);
689
690         for (i = 0; i < pev->ntevs; i++) {
691                 int n;
692
693                 n = ptevs[i] - pev->tevs;
694                 if (i == 0) {
695                         mapping[n] = type;
696                         pr_debug("mapping[%d]=%d\n", n, type);
697                         continue;
698                 }
699
700                 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
701                         mapping[n] = type;
702                 else
703                         mapping[n] = ++type;
704
705                 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
706         }
707         free(ptevs);
708         *nr_types = type + 1;
709
710         return 0;
711 }
712
713 static int hook_load_preprocessor(struct bpf_program *prog)
714 {
715         struct bpf_prog_priv *priv = program_priv(prog);
716         struct perf_probe_event *pev;
717         bool need_prologue = false;
718         int err, i;
719
720         if (IS_ERR_OR_NULL(priv)) {
721                 pr_debug("Internal error when hook preprocessor\n");
722                 return -BPF_LOADER_ERRNO__INTERNAL;
723         }
724
725         if (priv->is_tp) {
726                 priv->need_prologue = false;
727                 return 0;
728         }
729
730         pev = &priv->pev;
731         for (i = 0; i < pev->ntevs; i++) {
732                 struct probe_trace_event *tev = &pev->tevs[i];
733
734                 if (tev->nargs > 0) {
735                         need_prologue = true;
736                         break;
737                 }
738         }
739
740         /*
741          * Since all tevs don't have argument, we don't need generate
742          * prologue.
743          */
744         if (!need_prologue) {
745                 priv->need_prologue = false;
746                 return 0;
747         }
748
749         priv->need_prologue = true;
750         priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
751         if (!priv->insns_buf) {
752                 pr_debug("Not enough memory: alloc insns_buf failed\n");
753                 return -ENOMEM;
754         }
755
756         priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
757         if (!priv->type_mapping) {
758                 pr_debug("Not enough memory: alloc type_mapping failed\n");
759                 return -ENOMEM;
760         }
761         memset(priv->type_mapping, -1,
762                sizeof(int) * pev->ntevs);
763
764         err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
765         if (err)
766                 return err;
767
768         err = bpf_program__set_prep(prog, priv->nr_types,
769                                     preproc_gen_prologue);
770         return err;
771 }
772
773 int bpf__probe(struct bpf_object *obj)
774 {
775         int err = 0;
776         struct bpf_program *prog;
777         struct bpf_prog_priv *priv;
778         struct perf_probe_event *pev;
779
780         err = bpf__prepare_probe();
781         if (err) {
782                 pr_debug("bpf__prepare_probe failed\n");
783                 return err;
784         }
785
786         bpf_object__for_each_program(prog, obj) {
787                 err = config_bpf_program(prog);
788                 if (err)
789                         goto out;
790
791                 priv = program_priv(prog);
792                 if (IS_ERR_OR_NULL(priv)) {
793                         if (!priv)
794                                 err = -BPF_LOADER_ERRNO__INTERNAL;
795                         else
796                                 err = PTR_ERR(priv);
797                         goto out;
798                 }
799
800                 if (priv->is_tp) {
801                         bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
802                         continue;
803                 }
804
805                 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
806                 pev = &priv->pev;
807
808                 err = convert_perf_probe_events(pev, 1);
809                 if (err < 0) {
810                         pr_debug("bpf_probe: failed to convert perf probe events\n");
811                         goto out;
812                 }
813
814                 err = apply_perf_probe_events(pev, 1);
815                 if (err < 0) {
816                         pr_debug("bpf_probe: failed to apply perf probe events\n");
817                         goto out;
818                 }
819
820                 /*
821                  * After probing, let's consider prologue, which
822                  * adds program fetcher to BPF programs.
823                  *
824                  * hook_load_preprocessor() hooks pre-processor
825                  * to bpf_program, let it generate prologue
826                  * dynamically during loading.
827                  */
828                 err = hook_load_preprocessor(prog);
829                 if (err)
830                         goto out;
831         }
832 out:
833         return err < 0 ? err : 0;
834 }
835
836 #define EVENTS_WRITE_BUFSIZE  4096
837 int bpf__unprobe(struct bpf_object *obj)
838 {
839         int err, ret = 0;
840         struct bpf_program *prog;
841
842         bpf_object__for_each_program(prog, obj) {
843                 struct bpf_prog_priv *priv = program_priv(prog);
844                 int i;
845
846                 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
847                         continue;
848
849                 for (i = 0; i < priv->pev.ntevs; i++) {
850                         struct probe_trace_event *tev = &priv->pev.tevs[i];
851                         char name_buf[EVENTS_WRITE_BUFSIZE];
852                         struct strfilter *delfilter;
853
854                         snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
855                                  "%s:%s", tev->group, tev->event);
856                         name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
857
858                         delfilter = strfilter__new(name_buf, NULL);
859                         if (!delfilter) {
860                                 pr_debug("Failed to create filter for unprobing\n");
861                                 ret = -ENOMEM;
862                                 continue;
863                         }
864
865                         err = del_perf_probe_events(delfilter);
866                         strfilter__delete(delfilter);
867                         if (err) {
868                                 pr_debug("Failed to delete %s\n", name_buf);
869                                 ret = err;
870                                 continue;
871                         }
872                 }
873         }
874         return ret;
875 }
876
877 int bpf__load(struct bpf_object *obj)
878 {
879         int err;
880
881         err = bpf_object__load(obj);
882         if (err) {
883                 char bf[128];
884                 libbpf_strerror(err, bf, sizeof(bf));
885                 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
886                 return err;
887         }
888         return 0;
889 }
890
891 int bpf__foreach_event(struct bpf_object *obj,
892                        bpf_prog_iter_callback_t func,
893                        void *arg)
894 {
895         struct bpf_program *prog;
896         int err;
897
898         bpf_object__for_each_program(prog, obj) {
899                 struct bpf_prog_priv *priv = program_priv(prog);
900                 struct probe_trace_event *tev;
901                 struct perf_probe_event *pev;
902                 int i, fd;
903
904                 if (IS_ERR_OR_NULL(priv)) {
905                         pr_debug("bpf: failed to get private field\n");
906                         return -BPF_LOADER_ERRNO__INTERNAL;
907                 }
908
909                 if (priv->is_tp) {
910                         fd = bpf_program__fd(prog);
911                         err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
912                         if (err) {
913                                 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
914                                 return err;
915                         }
916                         continue;
917                 }
918
919                 pev = &priv->pev;
920                 for (i = 0; i < pev->ntevs; i++) {
921                         tev = &pev->tevs[i];
922
923                         if (priv->need_prologue) {
924                                 int type = priv->type_mapping[i];
925
926                                 fd = bpf_program__nth_fd(prog, type);
927                         } else {
928                                 fd = bpf_program__fd(prog);
929                         }
930
931                         if (fd < 0) {
932                                 pr_debug("bpf: failed to get file descriptor\n");
933                                 return fd;
934                         }
935
936                         err = (*func)(tev->group, tev->event, fd, obj, arg);
937                         if (err) {
938                                 pr_debug("bpf: call back failed, stop iterate\n");
939                                 return err;
940                         }
941                 }
942         }
943         return 0;
944 }
945
946 enum bpf_map_op_type {
947         BPF_MAP_OP_SET_VALUE,
948         BPF_MAP_OP_SET_EVSEL,
949 };
950
951 enum bpf_map_key_type {
952         BPF_MAP_KEY_ALL,
953         BPF_MAP_KEY_RANGES,
954 };
955
956 struct bpf_map_op {
957         struct list_head list;
958         enum bpf_map_op_type op_type;
959         enum bpf_map_key_type key_type;
960         union {
961                 struct parse_events_array array;
962         } k;
963         union {
964                 u64 value;
965                 struct evsel *evsel;
966         } v;
967 };
968
969 struct bpf_map_priv {
970         struct list_head ops_list;
971 };
972
973 static void
974 bpf_map_op__delete(struct bpf_map_op *op)
975 {
976         if (!list_empty(&op->list))
977                 list_del_init(&op->list);
978         if (op->key_type == BPF_MAP_KEY_RANGES)
979                 parse_events__clear_array(&op->k.array);
980         free(op);
981 }
982
983 static void
984 bpf_map_priv__purge(struct bpf_map_priv *priv)
985 {
986         struct bpf_map_op *pos, *n;
987
988         list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
989                 list_del_init(&pos->list);
990                 bpf_map_op__delete(pos);
991         }
992 }
993
994 static void
995 bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
996                     void *_priv)
997 {
998         struct bpf_map_priv *priv = _priv;
999
1000         bpf_map_priv__purge(priv);
1001         free(priv);
1002 }
1003
1004 static void *map_priv(const struct bpf_map *map)
1005 {
1006         void *priv;
1007
1008         if (IS_ERR_OR_NULL(bpf_map_hash))
1009                 return NULL;
1010         if (!hashmap__find(bpf_map_hash, map, &priv))
1011                 return NULL;
1012         return priv;
1013 }
1014
1015 static void bpf_map_hash_free(void)
1016 {
1017         struct hashmap_entry *cur;
1018         size_t bkt;
1019
1020         if (IS_ERR_OR_NULL(bpf_map_hash))
1021                 return;
1022
1023         hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1024                 bpf_map_priv__clear(cur->key, cur->value);
1025
1026         hashmap__free(bpf_map_hash);
1027         bpf_map_hash = NULL;
1028 }
1029
1030 static int map_set_priv(struct bpf_map *map, void *priv)
1031 {
1032         void *old_priv;
1033
1034         if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1035                 return PTR_ERR(bpf_program_hash);
1036
1037         if (!bpf_map_hash) {
1038                 bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1039                 if (IS_ERR(bpf_map_hash))
1040                         return PTR_ERR(bpf_map_hash);
1041         }
1042
1043         old_priv = map_priv(map);
1044         if (old_priv) {
1045                 bpf_map_priv__clear(map, old_priv);
1046                 return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1047         }
1048         return hashmap__add(bpf_map_hash, map, priv);
1049 }
1050
1051 static int
1052 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1053 {
1054         op->key_type = BPF_MAP_KEY_ALL;
1055         if (!term)
1056                 return 0;
1057
1058         if (term->array.nr_ranges) {
1059                 size_t memsz = term->array.nr_ranges *
1060                                 sizeof(op->k.array.ranges[0]);
1061
1062                 op->k.array.ranges = memdup(term->array.ranges, memsz);
1063                 if (!op->k.array.ranges) {
1064                         pr_debug("Not enough memory to alloc indices for map\n");
1065                         return -ENOMEM;
1066                 }
1067                 op->key_type = BPF_MAP_KEY_RANGES;
1068                 op->k.array.nr_ranges = term->array.nr_ranges;
1069         }
1070         return 0;
1071 }
1072
1073 static struct bpf_map_op *
1074 bpf_map_op__new(struct parse_events_term *term)
1075 {
1076         struct bpf_map_op *op;
1077         int err;
1078
1079         op = zalloc(sizeof(*op));
1080         if (!op) {
1081                 pr_debug("Failed to alloc bpf_map_op\n");
1082                 return ERR_PTR(-ENOMEM);
1083         }
1084         INIT_LIST_HEAD(&op->list);
1085
1086         err = bpf_map_op_setkey(op, term);
1087         if (err) {
1088                 free(op);
1089                 return ERR_PTR(err);
1090         }
1091         return op;
1092 }
1093
1094 static struct bpf_map_op *
1095 bpf_map_op__clone(struct bpf_map_op *op)
1096 {
1097         struct bpf_map_op *newop;
1098
1099         newop = memdup(op, sizeof(*op));
1100         if (!newop) {
1101                 pr_debug("Failed to alloc bpf_map_op\n");
1102                 return NULL;
1103         }
1104
1105         INIT_LIST_HEAD(&newop->list);
1106         if (op->key_type == BPF_MAP_KEY_RANGES) {
1107                 size_t memsz = op->k.array.nr_ranges *
1108                                sizeof(op->k.array.ranges[0]);
1109
1110                 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1111                 if (!newop->k.array.ranges) {
1112                         pr_debug("Failed to alloc indices for map\n");
1113                         free(newop);
1114                         return NULL;
1115                 }
1116         }
1117
1118         return newop;
1119 }
1120
1121 static struct bpf_map_priv *
1122 bpf_map_priv__clone(struct bpf_map_priv *priv)
1123 {
1124         struct bpf_map_priv *newpriv;
1125         struct bpf_map_op *pos, *newop;
1126
1127         newpriv = zalloc(sizeof(*newpriv));
1128         if (!newpriv) {
1129                 pr_debug("Not enough memory to alloc map private\n");
1130                 return NULL;
1131         }
1132         INIT_LIST_HEAD(&newpriv->ops_list);
1133
1134         list_for_each_entry(pos, &priv->ops_list, list) {
1135                 newop = bpf_map_op__clone(pos);
1136                 if (!newop) {
1137                         bpf_map_priv__purge(newpriv);
1138                         return NULL;
1139                 }
1140                 list_add_tail(&newop->list, &newpriv->ops_list);
1141         }
1142
1143         return newpriv;
1144 }
1145
1146 static int
1147 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1148 {
1149         const char *map_name = bpf_map__name(map);
1150         struct bpf_map_priv *priv = map_priv(map);
1151
1152         if (IS_ERR(priv)) {
1153                 pr_debug("Failed to get private from map %s\n", map_name);
1154                 return PTR_ERR(priv);
1155         }
1156
1157         if (!priv) {
1158                 priv = zalloc(sizeof(*priv));
1159                 if (!priv) {
1160                         pr_debug("Not enough memory to alloc map private\n");
1161                         return -ENOMEM;
1162                 }
1163                 INIT_LIST_HEAD(&priv->ops_list);
1164
1165                 if (map_set_priv(map, priv)) {
1166                         free(priv);
1167                         return -BPF_LOADER_ERRNO__INTERNAL;
1168                 }
1169         }
1170
1171         list_add_tail(&op->list, &priv->ops_list);
1172         return 0;
1173 }
1174
1175 static struct bpf_map_op *
1176 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1177 {
1178         struct bpf_map_op *op;
1179         int err;
1180
1181         op = bpf_map_op__new(term);
1182         if (IS_ERR(op))
1183                 return op;
1184
1185         err = bpf_map__add_op(map, op);
1186         if (err) {
1187                 bpf_map_op__delete(op);
1188                 return ERR_PTR(err);
1189         }
1190         return op;
1191 }
1192
1193 static int
1194 __bpf_map__config_value(struct bpf_map *map,
1195                         struct parse_events_term *term)
1196 {
1197         struct bpf_map_op *op;
1198         const char *map_name = bpf_map__name(map);
1199
1200         if (!map) {
1201                 pr_debug("Map '%s' is invalid\n", map_name);
1202                 return -BPF_LOADER_ERRNO__INTERNAL;
1203         }
1204
1205         if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1206                 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1207                          map_name);
1208                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1209         }
1210         if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1211                 pr_debug("Map %s has incorrect key size\n", map_name);
1212                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1213         }
1214         switch (bpf_map__value_size(map)) {
1215         case 1:
1216         case 2:
1217         case 4:
1218         case 8:
1219                 break;
1220         default:
1221                 pr_debug("Map %s has incorrect value size\n", map_name);
1222                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1223         }
1224
1225         op = bpf_map__add_newop(map, term);
1226         if (IS_ERR(op))
1227                 return PTR_ERR(op);
1228         op->op_type = BPF_MAP_OP_SET_VALUE;
1229         op->v.value = term->val.num;
1230         return 0;
1231 }
1232
1233 static int
1234 bpf_map__config_value(struct bpf_map *map,
1235                       struct parse_events_term *term,
1236                       struct evlist *evlist __maybe_unused)
1237 {
1238         if (!term->err_val) {
1239                 pr_debug("Config value not set\n");
1240                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1241         }
1242
1243         if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1244                 pr_debug("ERROR: wrong value type for 'value'\n");
1245                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1246         }
1247
1248         return __bpf_map__config_value(map, term);
1249 }
1250
1251 static int
1252 __bpf_map__config_event(struct bpf_map *map,
1253                         struct parse_events_term *term,
1254                         struct evlist *evlist)
1255 {
1256         struct bpf_map_op *op;
1257         const char *map_name = bpf_map__name(map);
1258         struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1259
1260         if (!evsel) {
1261                 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1262                          map_name, term->val.str);
1263                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1264         }
1265
1266         if (!map) {
1267                 pr_debug("Map '%s' is invalid\n", map_name);
1268                 return PTR_ERR(map);
1269         }
1270
1271         /*
1272          * No need to check key_size and value_size:
1273          * kernel has already checked them.
1274          */
1275         if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1276                 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1277                          map_name);
1278                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1279         }
1280
1281         op = bpf_map__add_newop(map, term);
1282         if (IS_ERR(op))
1283                 return PTR_ERR(op);
1284         op->op_type = BPF_MAP_OP_SET_EVSEL;
1285         op->v.evsel = evsel;
1286         return 0;
1287 }
1288
1289 static int
1290 bpf_map__config_event(struct bpf_map *map,
1291                       struct parse_events_term *term,
1292                       struct evlist *evlist)
1293 {
1294         if (!term->err_val) {
1295                 pr_debug("Config value not set\n");
1296                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1297         }
1298
1299         if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1300                 pr_debug("ERROR: wrong value type for 'event'\n");
1301                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1302         }
1303
1304         return __bpf_map__config_event(map, term, evlist);
1305 }
1306
1307 struct bpf_obj_config__map_func {
1308         const char *config_opt;
1309         int (*config_func)(struct bpf_map *, struct parse_events_term *,
1310                            struct evlist *);
1311 };
1312
1313 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1314         {"value", bpf_map__config_value},
1315         {"event", bpf_map__config_event},
1316 };
1317
1318 static int
1319 config_map_indices_range_check(struct parse_events_term *term,
1320                                struct bpf_map *map,
1321                                const char *map_name)
1322 {
1323         struct parse_events_array *array = &term->array;
1324         unsigned int i;
1325
1326         if (!array->nr_ranges)
1327                 return 0;
1328         if (!array->ranges) {
1329                 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1330                          map_name, (int)array->nr_ranges);
1331                 return -BPF_LOADER_ERRNO__INTERNAL;
1332         }
1333
1334         if (!map) {
1335                 pr_debug("Map '%s' is invalid\n", map_name);
1336                 return -BPF_LOADER_ERRNO__INTERNAL;
1337         }
1338
1339         for (i = 0; i < array->nr_ranges; i++) {
1340                 unsigned int start = array->ranges[i].start;
1341                 size_t length = array->ranges[i].length;
1342                 unsigned int idx = start + length - 1;
1343
1344                 if (idx >= bpf_map__max_entries(map)) {
1345                         pr_debug("ERROR: index %d too large\n", idx);
1346                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1347                 }
1348         }
1349         return 0;
1350 }
1351
1352 static int
1353 bpf__obj_config_map(struct bpf_object *obj,
1354                     struct parse_events_term *term,
1355                     struct evlist *evlist,
1356                     int *key_scan_pos)
1357 {
1358         /* key is "map:<mapname>.<config opt>" */
1359         char *map_name = strdup(term->config + sizeof("map:") - 1);
1360         struct bpf_map *map;
1361         int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1362         char *map_opt;
1363         size_t i;
1364
1365         if (!map_name)
1366                 return -ENOMEM;
1367
1368         map_opt = strchr(map_name, '.');
1369         if (!map_opt) {
1370                 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1371                 goto out;
1372         }
1373
1374         *map_opt++ = '\0';
1375         if (*map_opt == '\0') {
1376                 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1377                 goto out;
1378         }
1379
1380         map = bpf_object__find_map_by_name(obj, map_name);
1381         if (!map) {
1382                 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1383                 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1384                 goto out;
1385         }
1386
1387         *key_scan_pos += strlen(map_opt);
1388         err = config_map_indices_range_check(term, map, map_name);
1389         if (err)
1390                 goto out;
1391         *key_scan_pos -= strlen(map_opt);
1392
1393         for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1394                 struct bpf_obj_config__map_func *func =
1395                                 &bpf_obj_config__map_funcs[i];
1396
1397                 if (strcmp(map_opt, func->config_opt) == 0) {
1398                         err = func->config_func(map, term, evlist);
1399                         goto out;
1400                 }
1401         }
1402
1403         pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1404         err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1405 out:
1406         if (!err)
1407                 *key_scan_pos += strlen(map_opt);
1408
1409         free(map_name);
1410         return err;
1411 }
1412
1413 int bpf__config_obj(struct bpf_object *obj,
1414                     struct parse_events_term *term,
1415                     struct evlist *evlist,
1416                     int *error_pos)
1417 {
1418         int key_scan_pos = 0;
1419         int err;
1420
1421         if (!obj || !term || !term->config)
1422                 return -EINVAL;
1423
1424         if (strstarts(term->config, "map:")) {
1425                 key_scan_pos = sizeof("map:") - 1;
1426                 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1427                 goto out;
1428         }
1429         err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1430 out:
1431         if (error_pos)
1432                 *error_pos = key_scan_pos;
1433         return err;
1434
1435 }
1436
1437 typedef int (*map_config_func_t)(const char *name, int map_fd,
1438                                  const struct bpf_map *map,
1439                                  struct bpf_map_op *op,
1440                                  void *pkey, void *arg);
1441
1442 static int
1443 foreach_key_array_all(map_config_func_t func,
1444                       void *arg, const char *name,
1445                       int map_fd, const struct bpf_map *map,
1446                       struct bpf_map_op *op)
1447 {
1448         unsigned int i;
1449         int err;
1450
1451         for (i = 0; i < bpf_map__max_entries(map); i++) {
1452                 err = func(name, map_fd, map, op, &i, arg);
1453                 if (err) {
1454                         pr_debug("ERROR: failed to insert value to %s[%u]\n",
1455                                  name, i);
1456                         return err;
1457                 }
1458         }
1459         return 0;
1460 }
1461
1462 static int
1463 foreach_key_array_ranges(map_config_func_t func, void *arg,
1464                          const char *name, int map_fd,
1465                          const struct bpf_map *map,
1466                          struct bpf_map_op *op)
1467 {
1468         unsigned int i, j;
1469         int err;
1470
1471         for (i = 0; i < op->k.array.nr_ranges; i++) {
1472                 unsigned int start = op->k.array.ranges[i].start;
1473                 size_t length = op->k.array.ranges[i].length;
1474
1475                 for (j = 0; j < length; j++) {
1476                         unsigned int idx = start + j;
1477
1478                         err = func(name, map_fd, map, op, &idx, arg);
1479                         if (err) {
1480                                 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1481                                          name, idx);
1482                                 return err;
1483                         }
1484                 }
1485         }
1486         return 0;
1487 }
1488
1489 static int
1490 bpf_map_config_foreach_key(struct bpf_map *map,
1491                            map_config_func_t func,
1492                            void *arg)
1493 {
1494         int err, map_fd, type;
1495         struct bpf_map_op *op;
1496         const char *name = bpf_map__name(map);
1497         struct bpf_map_priv *priv = map_priv(map);
1498
1499         if (IS_ERR(priv)) {
1500                 pr_debug("ERROR: failed to get private from map %s\n", name);
1501                 return -BPF_LOADER_ERRNO__INTERNAL;
1502         }
1503         if (!priv || list_empty(&priv->ops_list)) {
1504                 pr_debug("INFO: nothing to config for map %s\n", name);
1505                 return 0;
1506         }
1507
1508         if (!map) {
1509                 pr_debug("Map '%s' is invalid\n", name);
1510                 return -BPF_LOADER_ERRNO__INTERNAL;
1511         }
1512         map_fd = bpf_map__fd(map);
1513         if (map_fd < 0) {
1514                 pr_debug("ERROR: failed to get fd from map %s\n", name);
1515                 return map_fd;
1516         }
1517
1518         type = bpf_map__type(map);
1519         list_for_each_entry(op, &priv->ops_list, list) {
1520                 switch (type) {
1521                 case BPF_MAP_TYPE_ARRAY:
1522                 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1523                         switch (op->key_type) {
1524                         case BPF_MAP_KEY_ALL:
1525                                 err = foreach_key_array_all(func, arg, name,
1526                                                             map_fd, map, op);
1527                                 break;
1528                         case BPF_MAP_KEY_RANGES:
1529                                 err = foreach_key_array_ranges(func, arg, name,
1530                                                                map_fd, map, op);
1531                                 break;
1532                         default:
1533                                 pr_debug("ERROR: keytype for map '%s' invalid\n",
1534                                          name);
1535                                 return -BPF_LOADER_ERRNO__INTERNAL;
1536                         }
1537                         if (err)
1538                                 return err;
1539                         break;
1540                 default:
1541                         pr_debug("ERROR: type of '%s' incorrect\n", name);
1542                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1543                 }
1544         }
1545
1546         return 0;
1547 }
1548
1549 static int
1550 apply_config_value_for_key(int map_fd, void *pkey,
1551                            size_t val_size, u64 val)
1552 {
1553         int err = 0;
1554
1555         switch (val_size) {
1556         case 1: {
1557                 u8 _val = (u8)(val);
1558                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1559                 break;
1560         }
1561         case 2: {
1562                 u16 _val = (u16)(val);
1563                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1564                 break;
1565         }
1566         case 4: {
1567                 u32 _val = (u32)(val);
1568                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1569                 break;
1570         }
1571         case 8: {
1572                 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1573                 break;
1574         }
1575         default:
1576                 pr_debug("ERROR: invalid value size\n");
1577                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1578         }
1579         if (err && errno)
1580                 err = -errno;
1581         return err;
1582 }
1583
1584 static int
1585 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1586                            struct evsel *evsel)
1587 {
1588         struct xyarray *xy = evsel->core.fd;
1589         struct perf_event_attr *attr;
1590         unsigned int key, events;
1591         bool check_pass = false;
1592         int *evt_fd;
1593         int err;
1594
1595         if (!xy) {
1596                 pr_debug("ERROR: evsel not ready for map %s\n", name);
1597                 return -BPF_LOADER_ERRNO__INTERNAL;
1598         }
1599
1600         if (xy->row_size / xy->entry_size != 1) {
1601                 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1602                          name);
1603                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1604         }
1605
1606         attr = &evsel->core.attr;
1607         if (attr->inherit) {
1608                 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1609                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1610         }
1611
1612         if (evsel__is_bpf_output(evsel))
1613                 check_pass = true;
1614         if (attr->type == PERF_TYPE_RAW)
1615                 check_pass = true;
1616         if (attr->type == PERF_TYPE_HARDWARE)
1617                 check_pass = true;
1618         if (!check_pass) {
1619                 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1620                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1621         }
1622
1623         events = xy->entries / (xy->row_size / xy->entry_size);
1624         key = *((unsigned int *)pkey);
1625         if (key >= events) {
1626                 pr_debug("ERROR: there is no event %d for map %s\n",
1627                          key, name);
1628                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1629         }
1630         evt_fd = xyarray__entry(xy, key, 0);
1631         err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1632         if (err && errno)
1633                 err = -errno;
1634         return err;
1635 }
1636
1637 static int
1638 apply_obj_config_map_for_key(const char *name, int map_fd,
1639                              const struct bpf_map *map,
1640                              struct bpf_map_op *op,
1641                              void *pkey, void *arg __maybe_unused)
1642 {
1643         int err;
1644
1645         switch (op->op_type) {
1646         case BPF_MAP_OP_SET_VALUE:
1647                 err = apply_config_value_for_key(map_fd, pkey,
1648                                                  bpf_map__value_size(map),
1649                                                  op->v.value);
1650                 break;
1651         case BPF_MAP_OP_SET_EVSEL:
1652                 err = apply_config_evsel_for_key(name, map_fd, pkey,
1653                                                  op->v.evsel);
1654                 break;
1655         default:
1656                 pr_debug("ERROR: unknown value type for '%s'\n", name);
1657                 err = -BPF_LOADER_ERRNO__INTERNAL;
1658         }
1659         return err;
1660 }
1661
1662 static int
1663 apply_obj_config_map(struct bpf_map *map)
1664 {
1665         return bpf_map_config_foreach_key(map,
1666                                           apply_obj_config_map_for_key,
1667                                           NULL);
1668 }
1669
1670 static int
1671 apply_obj_config_object(struct bpf_object *obj)
1672 {
1673         struct bpf_map *map;
1674         int err;
1675
1676         bpf_object__for_each_map(map, obj) {
1677                 err = apply_obj_config_map(map);
1678                 if (err)
1679                         return err;
1680         }
1681         return 0;
1682 }
1683
1684 int bpf__apply_obj_config(void)
1685 {
1686         struct bpf_perf_object *perf_obj, *tmp;
1687         int err;
1688
1689         bpf_perf_object__for_each(perf_obj, tmp) {
1690                 err = apply_obj_config_object(perf_obj->obj);
1691                 if (err)
1692                         return err;
1693         }
1694
1695         return 0;
1696 }
1697
1698 #define bpf__perf_for_each_map(map, pobj, tmp)                  \
1699         bpf_perf_object__for_each(pobj, tmp)                    \
1700                 bpf_object__for_each_map(map, pobj->obj)
1701
1702 #define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name)  \
1703         bpf__perf_for_each_map(map, pobj, pobjtmp)              \
1704                 if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1705
1706 struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1707 {
1708         struct bpf_map_priv *tmpl_priv = NULL;
1709         struct bpf_perf_object *perf_obj, *tmp;
1710         struct evsel *evsel = NULL;
1711         struct bpf_map *map;
1712         int err;
1713         bool need_init = false;
1714
1715         bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1716                 struct bpf_map_priv *priv = map_priv(map);
1717
1718                 if (IS_ERR(priv))
1719                         return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1720
1721                 /*
1722                  * No need to check map type: type should have been
1723                  * verified by kernel.
1724                  */
1725                 if (!need_init && !priv)
1726                         need_init = !priv;
1727                 if (!tmpl_priv && priv)
1728                         tmpl_priv = priv;
1729         }
1730
1731         if (!need_init)
1732                 return NULL;
1733
1734         if (!tmpl_priv) {
1735                 char *event_definition = NULL;
1736
1737                 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1738                         return ERR_PTR(-ENOMEM);
1739
1740                 err = parse_events(evlist, event_definition, NULL);
1741                 free(event_definition);
1742
1743                 if (err) {
1744                         pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1745                         return ERR_PTR(-err);
1746                 }
1747
1748                 evsel = evlist__last(evlist);
1749         }
1750
1751         bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1752                 struct bpf_map_priv *priv = map_priv(map);
1753
1754                 if (IS_ERR(priv))
1755                         return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1756                 if (priv)
1757                         continue;
1758
1759                 if (tmpl_priv) {
1760                         priv = bpf_map_priv__clone(tmpl_priv);
1761                         if (!priv)
1762                                 return ERR_PTR(-ENOMEM);
1763
1764                         err = map_set_priv(map, priv);
1765                         if (err) {
1766                                 bpf_map_priv__clear(map, priv);
1767                                 return ERR_PTR(err);
1768                         }
1769                 } else if (evsel) {
1770                         struct bpf_map_op *op;
1771
1772                         op = bpf_map__add_newop(map, NULL);
1773                         if (IS_ERR(op))
1774                                 return ERR_CAST(op);
1775                         op->op_type = BPF_MAP_OP_SET_EVSEL;
1776                         op->v.evsel = evsel;
1777                 }
1778         }
1779
1780         return evsel;
1781 }
1782
1783 int bpf__setup_stdout(struct evlist *evlist)
1784 {
1785         struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1786         return PTR_ERR_OR_ZERO(evsel);
1787 }
1788
1789 #define ERRNO_OFFSET(e)         ((e) - __BPF_LOADER_ERRNO__START)
1790 #define ERRCODE_OFFSET(c)       ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1791 #define NR_ERRNO        (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1792
1793 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1794         [ERRCODE_OFFSET(CONFIG)]        = "Invalid config string",
1795         [ERRCODE_OFFSET(GROUP)]         = "Invalid group name",
1796         [ERRCODE_OFFSET(EVENTNAME)]     = "No event name found in config string",
1797         [ERRCODE_OFFSET(INTERNAL)]      = "BPF loader internal error",
1798         [ERRCODE_OFFSET(COMPILE)]       = "Error when compiling BPF scriptlet",
1799         [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1800         [ERRCODE_OFFSET(PROLOGUE)]      = "Failed to generate prologue",
1801         [ERRCODE_OFFSET(PROLOGUE2BIG)]  = "Prologue too big for program",
1802         [ERRCODE_OFFSET(PROLOGUEOOB)]   = "Offset out of bound for prologue",
1803         [ERRCODE_OFFSET(OBJCONF_OPT)]   = "Invalid object config option",
1804         [ERRCODE_OFFSET(OBJCONF_CONF)]  = "Config value not set (missing '=')",
1805         [ERRCODE_OFFSET(OBJCONF_MAP_OPT)]       = "Invalid object map config option",
1806         [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)]  = "Target map doesn't exist",
1807         [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)]     = "Incorrect value type for map",
1808         [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)]      = "Incorrect map type",
1809         [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)]   = "Incorrect map key size",
1810         [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1811         [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)]     = "Event not found for map setting",
1812         [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)]   = "Invalid map size for event setting",
1813         [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)]    = "Event dimension too large",
1814         [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)]    = "Doesn't support inherit event",
1815         [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)]   = "Wrong event type for map",
1816         [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)]   = "Index too large",
1817 };
1818
1819 static int
1820 bpf_loader_strerror(int err, char *buf, size_t size)
1821 {
1822         char sbuf[STRERR_BUFSIZE];
1823         const char *msg;
1824
1825         if (!buf || !size)
1826                 return -1;
1827
1828         err = err > 0 ? err : -err;
1829
1830         if (err >= __LIBBPF_ERRNO__START)
1831                 return libbpf_strerror(err, buf, size);
1832
1833         if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1834                 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1835                 snprintf(buf, size, "%s", msg);
1836                 buf[size - 1] = '\0';
1837                 return 0;
1838         }
1839
1840         if (err >= __BPF_LOADER_ERRNO__END)
1841                 snprintf(buf, size, "Unknown bpf loader error %d", err);
1842         else
1843                 snprintf(buf, size, "%s",
1844                          str_error_r(err, sbuf, sizeof(sbuf)));
1845
1846         buf[size - 1] = '\0';
1847         return -1;
1848 }
1849
1850 #define bpf__strerror_head(err, buf, size) \
1851         char sbuf[STRERR_BUFSIZE], *emsg;\
1852         if (!size)\
1853                 return 0;\
1854         if (err < 0)\
1855                 err = -err;\
1856         bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1857         emsg = sbuf;\
1858         switch (err) {\
1859         default:\
1860                 scnprintf(buf, size, "%s", emsg);\
1861                 break;
1862
1863 #define bpf__strerror_entry(val, fmt...)\
1864         case val: {\
1865                 scnprintf(buf, size, fmt);\
1866                 break;\
1867         }
1868
1869 #define bpf__strerror_end(buf, size)\
1870         }\
1871         buf[size - 1] = '\0';
1872
1873 int bpf__strerror_prepare_load(const char *filename, bool source,
1874                                int err, char *buf, size_t size)
1875 {
1876         size_t n;
1877         int ret;
1878
1879         n = snprintf(buf, size, "Failed to load %s%s: ",
1880                          filename, source ? " from source" : "");
1881         if (n >= size) {
1882                 buf[size - 1] = '\0';
1883                 return 0;
1884         }
1885         buf += n;
1886         size -= n;
1887
1888         ret = bpf_loader_strerror(err, buf, size);
1889         buf[size - 1] = '\0';
1890         return ret;
1891 }
1892
1893 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1894                         int err, char *buf, size_t size)
1895 {
1896         bpf__strerror_head(err, buf, size);
1897         case BPF_LOADER_ERRNO__PROGCONF_TERM: {
1898                 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1899                 break;
1900         }
1901         bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1902         bpf__strerror_entry(EACCES, "You need to be root");
1903         bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1904         bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
1905         bpf__strerror_end(buf, size);
1906         return 0;
1907 }
1908
1909 int bpf__strerror_load(struct bpf_object *obj,
1910                        int err, char *buf, size_t size)
1911 {
1912         bpf__strerror_head(err, buf, size);
1913         case LIBBPF_ERRNO__KVER: {
1914                 unsigned int obj_kver = bpf_object__kversion(obj);
1915                 unsigned int real_kver;
1916
1917                 if (fetch_kernel_version(&real_kver, NULL, 0)) {
1918                         scnprintf(buf, size, "Unable to fetch kernel version");
1919                         break;
1920                 }
1921
1922                 if (obj_kver != real_kver) {
1923                         scnprintf(buf, size,
1924                                   "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1925                                   KVER_PARAM(obj_kver),
1926                                   KVER_PARAM(real_kver));
1927                         break;
1928                 }
1929
1930                 scnprintf(buf, size, "Failed to load program for unknown reason");
1931                 break;
1932         }
1933         bpf__strerror_end(buf, size);
1934         return 0;
1935 }
1936
1937 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1938                              struct parse_events_term *term __maybe_unused,
1939                              struct evlist *evlist __maybe_unused,
1940                              int *error_pos __maybe_unused, int err,
1941                              char *buf, size_t size)
1942 {
1943         bpf__strerror_head(err, buf, size);
1944         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
1945                             "Can't use this config term with this map type");
1946         bpf__strerror_end(buf, size);
1947         return 0;
1948 }
1949
1950 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1951 {
1952         bpf__strerror_head(err, buf, size);
1953         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
1954                             "Cannot set event to BPF map in multi-thread tracing");
1955         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
1956                             "%s (Hint: use -i to turn off inherit)", emsg);
1957         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
1958                             "Can only put raw, hardware and BPF output event into a BPF map");
1959         bpf__strerror_end(buf, size);
1960         return 0;
1961 }
1962
1963 int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
1964                                      int err, char *buf, size_t size)
1965 {
1966         bpf__strerror_head(err, buf, size);
1967         bpf__strerror_end(buf, size);
1968         return 0;
1969 }