1 // SPDX-License-Identifier: GPL-2.0
5 #include "util/header.h"
6 #include <linux/ctype.h>
7 #include <linux/zalloc.h>
10 #include <sys/utsname.h>
15 struct perf_env perf_env;
17 #ifdef HAVE_LIBBPF_SUPPORT
18 #include "bpf-event.h"
19 #include "bpf-utils.h"
20 #include <bpf/libbpf.h>
22 void perf_env__insert_bpf_prog_info(struct perf_env *env,
23 struct bpf_prog_info_node *info_node)
25 __u32 prog_id = info_node->info_linear->info.id;
26 struct bpf_prog_info_node *node;
27 struct rb_node *parent = NULL;
30 down_write(&env->bpf_progs.lock);
31 p = &env->bpf_progs.infos.rb_node;
35 node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
36 if (prog_id < node->info_linear->info.id) {
38 } else if (prog_id > node->info_linear->info.id) {
41 pr_debug("duplicated bpf prog info %u\n", prog_id);
46 rb_link_node(&info_node->rb_node, parent, p);
47 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
48 env->bpf_progs.infos_cnt++;
50 up_write(&env->bpf_progs.lock);
53 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
56 struct bpf_prog_info_node *node = NULL;
59 down_read(&env->bpf_progs.lock);
60 n = env->bpf_progs.infos.rb_node;
63 node = rb_entry(n, struct bpf_prog_info_node, rb_node);
64 if (prog_id < node->info_linear->info.id)
66 else if (prog_id > node->info_linear->info.id)
74 up_read(&env->bpf_progs.lock);
78 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
80 struct rb_node *parent = NULL;
81 __u32 btf_id = btf_node->id;
82 struct btf_node *node;
86 down_write(&env->bpf_progs.lock);
87 p = &env->bpf_progs.btfs.rb_node;
91 node = rb_entry(parent, struct btf_node, rb_node);
92 if (btf_id < node->id) {
94 } else if (btf_id > node->id) {
97 pr_debug("duplicated btf %u\n", btf_id);
103 rb_link_node(&btf_node->rb_node, parent, p);
104 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
105 env->bpf_progs.btfs_cnt++;
107 up_write(&env->bpf_progs.lock);
111 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
113 struct btf_node *node = NULL;
116 down_read(&env->bpf_progs.lock);
117 n = env->bpf_progs.btfs.rb_node;
120 node = rb_entry(n, struct btf_node, rb_node);
121 if (btf_id < node->id)
123 else if (btf_id > node->id)
131 up_read(&env->bpf_progs.lock);
135 /* purge data in bpf_progs.infos tree */
136 static void perf_env__purge_bpf(struct perf_env *env)
138 struct rb_root *root;
139 struct rb_node *next;
141 down_write(&env->bpf_progs.lock);
143 root = &env->bpf_progs.infos;
144 next = rb_first(root);
147 struct bpf_prog_info_node *node;
149 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
150 next = rb_next(&node->rb_node);
151 rb_erase(&node->rb_node, root);
152 free(node->info_linear);
156 env->bpf_progs.infos_cnt = 0;
158 root = &env->bpf_progs.btfs;
159 next = rb_first(root);
162 struct btf_node *node;
164 node = rb_entry(next, struct btf_node, rb_node);
165 next = rb_next(&node->rb_node);
166 rb_erase(&node->rb_node, root);
170 env->bpf_progs.btfs_cnt = 0;
172 up_write(&env->bpf_progs.lock);
174 #else // HAVE_LIBBPF_SUPPORT
175 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
178 #endif // HAVE_LIBBPF_SUPPORT
180 void perf_env__exit(struct perf_env *env)
184 perf_env__purge_bpf(env);
185 perf_env__purge_cgroups(env);
186 zfree(&env->hostname);
187 zfree(&env->os_release);
188 zfree(&env->version);
190 zfree(&env->cpu_desc);
192 zfree(&env->cmdline);
193 zfree(&env->cmdline_argv);
194 zfree(&env->sibling_dies);
195 zfree(&env->sibling_cores);
196 zfree(&env->sibling_threads);
197 zfree(&env->pmu_mappings);
199 zfree(&env->cpu_pmu_caps);
200 zfree(&env->numa_map);
202 for (i = 0; i < env->nr_numa_nodes; i++)
203 perf_cpu_map__put(env->numa_nodes[i].map);
204 zfree(&env->numa_nodes);
206 for (i = 0; i < env->caches_cnt; i++)
207 cpu_cache_level__free(&env->caches[i]);
210 for (i = 0; i < env->nr_memory_nodes; i++)
211 zfree(&env->memory_nodes[i].set);
212 zfree(&env->memory_nodes);
214 for (i = 0; i < env->nr_hybrid_nodes; i++) {
215 zfree(&env->hybrid_nodes[i].pmu_name);
216 zfree(&env->hybrid_nodes[i].cpus);
218 zfree(&env->hybrid_nodes);
220 for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
221 zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
222 zfree(&env->hybrid_cpc_nodes[i].pmu_name);
224 zfree(&env->hybrid_cpc_nodes);
227 void perf_env__init(struct perf_env *env)
229 #ifdef HAVE_LIBBPF_SUPPORT
230 env->bpf_progs.infos = RB_ROOT;
231 env->bpf_progs.btfs = RB_ROOT;
232 init_rwsem(&env->bpf_progs.lock);
234 env->kernel_is_64_bit = -1;
237 static void perf_env__init_kernel_mode(struct perf_env *env)
239 const char *arch = perf_env__raw_arch(env);
241 if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
242 !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
243 !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
244 !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
245 env->kernel_is_64_bit = 1;
247 env->kernel_is_64_bit = 0;
250 int perf_env__kernel_is_64_bit(struct perf_env *env)
252 if (env->kernel_is_64_bit == -1)
253 perf_env__init_kernel_mode(env);
255 return env->kernel_is_64_bit;
258 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
262 /* do not include NULL termination */
263 env->cmdline_argv = calloc(argc, sizeof(char *));
264 if (env->cmdline_argv == NULL)
268 * Must copy argv contents because it gets moved around during option
271 for (i = 0; i < argc ; i++) {
272 env->cmdline_argv[i] = argv[i];
273 if (env->cmdline_argv[i] == NULL)
277 env->nr_cmdline = argc;
281 zfree(&env->cmdline_argv);
286 int perf_env__read_cpu_topology_map(struct perf_env *env)
290 if (env->cpu != NULL)
293 if (env->nr_cpus_avail == 0)
294 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
296 nr_cpus = env->nr_cpus_avail;
300 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
301 if (env->cpu == NULL)
304 for (idx = 0; idx < nr_cpus; ++idx) {
305 struct perf_cpu cpu = { .cpu = idx };
307 env->cpu[idx].core_id = cpu__get_core_id(cpu);
308 env->cpu[idx].socket_id = cpu__get_socket_id(cpu);
309 env->cpu[idx].die_id = cpu__get_die_id(cpu);
312 env->nr_cpus_avail = nr_cpus;
316 int perf_env__read_pmu_mappings(struct perf_env *env)
318 struct perf_pmu *pmu = NULL;
322 while ((pmu = perf_pmu__scan(pmu))) {
328 pr_debug("pmu mappings not available\n");
331 env->nr_pmu_mappings = pmu_num;
333 if (strbuf_init(&sb, 128 * pmu_num) < 0)
336 while ((pmu = perf_pmu__scan(pmu))) {
339 if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
341 /* include a NULL character at the end */
342 if (strbuf_add(&sb, "", 1) < 0)
346 env->pmu_mappings = strbuf_detach(&sb, NULL);
355 int perf_env__read_cpuid(struct perf_env *env)
358 int err = get_cpuid(cpuid, sizeof(cpuid));
364 env->cpuid = strdup(cpuid);
365 if (env->cpuid == NULL)
370 static int perf_env__read_arch(struct perf_env *env)
378 env->arch = strdup(uts.machine);
380 return env->arch ? 0 : -ENOMEM;
383 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
385 if (env->nr_cpus_avail == 0)
386 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
388 return env->nr_cpus_avail ? 0 : -ENOENT;
391 const char *perf_env__raw_arch(struct perf_env *env)
393 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
396 int perf_env__nr_cpus_avail(struct perf_env *env)
398 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
401 void cpu_cache_level__free(struct cpu_cache_level *cache)
409 * Return architecture name in a normalized form.
410 * The conversion logic comes from the Makefile.
412 static const char *normalize_arch(char *arch)
414 if (!strcmp(arch, "x86_64"))
416 if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
418 if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
420 if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
422 if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
424 if (!strncmp(arch, "s390", 4))
426 if (!strncmp(arch, "parisc", 6))
428 if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
430 if (!strncmp(arch, "mips", 4))
432 if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
438 const char *perf_env__arch(struct perf_env *env)
442 if (!env || !env->arch) { /* Assume local operation */
443 static struct utsname uts = { .machine[0] = '\0', };
444 if (uts.machine[0] == '\0' && uname(&uts) < 0)
446 arch_name = uts.machine;
448 arch_name = env->arch;
450 return normalize_arch(arch_name);
453 const char *perf_env__cpuid(struct perf_env *env)
457 if (!env || !env->cpuid) { /* Assume local operation */
458 status = perf_env__read_cpuid(env);
466 int perf_env__nr_pmu_mappings(struct perf_env *env)
470 if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
471 status = perf_env__read_pmu_mappings(env);
476 return env->nr_pmu_mappings;
479 const char *perf_env__pmu_mappings(struct perf_env *env)
483 if (!env || !env->pmu_mappings) { /* Assume local operation */
484 status = perf_env__read_pmu_mappings(env);
489 return env->pmu_mappings;
492 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
494 if (!env->nr_numa_map) {
495 struct numa_node *nn;
498 for (i = 0; i < env->nr_numa_nodes; i++) {
499 nn = &env->numa_nodes[i];
500 nr = max(nr, perf_cpu_map__max(nn->map).cpu);
506 * We initialize the numa_map array to prepare
507 * it for missing cpus, which return node -1
509 env->numa_map = malloc(nr * sizeof(int));
513 for (i = 0; i < nr; i++)
514 env->numa_map[i] = -1;
516 env->nr_numa_map = nr;
518 for (i = 0; i < env->nr_numa_nodes; i++) {
522 nn = &env->numa_nodes[i];
523 perf_cpu_map__for_each_cpu(tmp, j, nn->map)
524 env->numa_map[tmp.cpu] = i;
528 return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;