1 // SPDX-License-Identifier: GPL-2.0
5 #include "util/header.h"
6 #include <linux/ctype.h>
7 #include <linux/zalloc.h>
11 #include <sys/utsname.h>
12 #include <bpf/libbpf.h>
16 struct perf_env perf_env;
18 void perf_env__insert_bpf_prog_info(struct perf_env *env,
19 struct bpf_prog_info_node *info_node)
21 down_write(&env->bpf_progs.lock);
22 __perf_env__insert_bpf_prog_info(env, info_node);
23 up_write(&env->bpf_progs.lock);
26 void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
28 __u32 prog_id = info_node->info_linear->info.id;
29 struct bpf_prog_info_node *node;
30 struct rb_node *parent = NULL;
33 p = &env->bpf_progs.infos.rb_node;
37 node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
38 if (prog_id < node->info_linear->info.id) {
40 } else if (prog_id > node->info_linear->info.id) {
43 pr_debug("duplicated bpf prog info %u\n", prog_id);
48 rb_link_node(&info_node->rb_node, parent, p);
49 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
50 env->bpf_progs.infos_cnt++;
53 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
56 struct bpf_prog_info_node *node = NULL;
59 down_read(&env->bpf_progs.lock);
60 n = env->bpf_progs.infos.rb_node;
63 node = rb_entry(n, struct bpf_prog_info_node, rb_node);
64 if (prog_id < node->info_linear->info.id)
66 else if (prog_id > node->info_linear->info.id)
74 up_read(&env->bpf_progs.lock);
78 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
82 down_write(&env->bpf_progs.lock);
83 ret = __perf_env__insert_btf(env, btf_node);
84 up_write(&env->bpf_progs.lock);
88 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
90 struct rb_node *parent = NULL;
91 __u32 btf_id = btf_node->id;
92 struct btf_node *node;
95 p = &env->bpf_progs.btfs.rb_node;
99 node = rb_entry(parent, struct btf_node, rb_node);
100 if (btf_id < node->id) {
102 } else if (btf_id > node->id) {
105 pr_debug("duplicated btf %u\n", btf_id);
110 rb_link_node(&btf_node->rb_node, parent, p);
111 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
112 env->bpf_progs.btfs_cnt++;
116 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
118 struct btf_node *res;
120 down_read(&env->bpf_progs.lock);
121 res = __perf_env__find_btf(env, btf_id);
122 up_read(&env->bpf_progs.lock);
126 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
128 struct btf_node *node = NULL;
131 n = env->bpf_progs.btfs.rb_node;
134 node = rb_entry(n, struct btf_node, rb_node);
135 if (btf_id < node->id)
137 else if (btf_id > node->id)
145 /* purge data in bpf_progs.infos tree */
146 static void perf_env__purge_bpf(struct perf_env *env)
148 struct rb_root *root;
149 struct rb_node *next;
151 down_write(&env->bpf_progs.lock);
153 root = &env->bpf_progs.infos;
154 next = rb_first(root);
157 struct bpf_prog_info_node *node;
159 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
160 next = rb_next(&node->rb_node);
161 rb_erase(&node->rb_node, root);
162 free(node->info_linear);
166 env->bpf_progs.infos_cnt = 0;
168 root = &env->bpf_progs.btfs;
169 next = rb_first(root);
172 struct btf_node *node;
174 node = rb_entry(next, struct btf_node, rb_node);
175 next = rb_next(&node->rb_node);
176 rb_erase(&node->rb_node, root);
180 env->bpf_progs.btfs_cnt = 0;
182 up_write(&env->bpf_progs.lock);
185 void perf_env__exit(struct perf_env *env)
189 perf_env__purge_bpf(env);
190 perf_env__purge_cgroups(env);
191 zfree(&env->hostname);
192 zfree(&env->os_release);
193 zfree(&env->version);
195 zfree(&env->cpu_desc);
197 zfree(&env->cmdline);
198 zfree(&env->cmdline_argv);
199 zfree(&env->sibling_dies);
200 zfree(&env->sibling_cores);
201 zfree(&env->sibling_threads);
202 zfree(&env->pmu_mappings);
204 zfree(&env->cpu_pmu_caps);
205 zfree(&env->numa_map);
207 for (i = 0; i < env->nr_numa_nodes; i++)
208 perf_cpu_map__put(env->numa_nodes[i].map);
209 zfree(&env->numa_nodes);
211 for (i = 0; i < env->caches_cnt; i++)
212 cpu_cache_level__free(&env->caches[i]);
215 for (i = 0; i < env->nr_memory_nodes; i++)
216 zfree(&env->memory_nodes[i].set);
217 zfree(&env->memory_nodes);
220 void perf_env__init(struct perf_env *env)
222 env->bpf_progs.infos = RB_ROOT;
223 env->bpf_progs.btfs = RB_ROOT;
224 init_rwsem(&env->bpf_progs.lock);
227 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
231 /* do not include NULL termination */
232 env->cmdline_argv = calloc(argc, sizeof(char *));
233 if (env->cmdline_argv == NULL)
237 * Must copy argv contents because it gets moved around during option
240 for (i = 0; i < argc ; i++) {
241 env->cmdline_argv[i] = argv[i];
242 if (env->cmdline_argv[i] == NULL)
246 env->nr_cmdline = argc;
250 zfree(&env->cmdline_argv);
255 int perf_env__read_cpu_topology_map(struct perf_env *env)
259 if (env->cpu != NULL)
262 if (env->nr_cpus_avail == 0)
263 env->nr_cpus_avail = cpu__max_present_cpu();
265 nr_cpus = env->nr_cpus_avail;
269 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
270 if (env->cpu == NULL)
273 for (cpu = 0; cpu < nr_cpus; ++cpu) {
274 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
275 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
276 env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
279 env->nr_cpus_avail = nr_cpus;
283 int perf_env__read_cpuid(struct perf_env *env)
286 int err = get_cpuid(cpuid, sizeof(cpuid));
292 env->cpuid = strdup(cpuid);
293 if (env->cpuid == NULL)
298 static int perf_env__read_arch(struct perf_env *env)
306 env->arch = strdup(uts.machine);
308 return env->arch ? 0 : -ENOMEM;
311 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
313 if (env->nr_cpus_avail == 0)
314 env->nr_cpus_avail = cpu__max_present_cpu();
316 return env->nr_cpus_avail ? 0 : -ENOENT;
319 const char *perf_env__raw_arch(struct perf_env *env)
321 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
324 int perf_env__nr_cpus_avail(struct perf_env *env)
326 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
329 void cpu_cache_level__free(struct cpu_cache_level *cache)
337 * Return architecture name in a normalized form.
338 * The conversion logic comes from the Makefile.
340 static const char *normalize_arch(char *arch)
342 if (!strcmp(arch, "x86_64"))
344 if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
346 if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
348 if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
350 if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
352 if (!strncmp(arch, "s390", 4))
354 if (!strncmp(arch, "parisc", 6))
356 if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
358 if (!strncmp(arch, "mips", 4))
360 if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
366 const char *perf_env__arch(struct perf_env *env)
370 if (!env || !env->arch) { /* Assume local operation */
371 static struct utsname uts = { .machine[0] = '\0', };
372 if (uts.machine[0] == '\0' && uname(&uts) < 0)
374 arch_name = uts.machine;
376 arch_name = env->arch;
378 return normalize_arch(arch_name);
382 int perf_env__numa_node(struct perf_env *env, int cpu)
384 if (!env->nr_numa_map) {
385 struct numa_node *nn;
388 for (i = 0; i < env->nr_numa_nodes; i++) {
389 nn = &env->numa_nodes[i];
390 nr = max(nr, perf_cpu_map__max(nn->map));
396 * We initialize the numa_map array to prepare
397 * it for missing cpus, which return node -1
399 env->numa_map = malloc(nr * sizeof(int));
403 for (i = 0; i < nr; i++)
404 env->numa_map[i] = -1;
406 env->nr_numa_map = nr;
408 for (i = 0; i < env->nr_numa_nodes; i++) {
411 nn = &env->numa_nodes[i];
412 perf_cpu_map__for_each_cpu(j, tmp, nn->map)
413 env->numa_map[j] = i;
417 return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;