1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
16 #include "bpf_skel/off_cpu.skel.h"
19 /* we don't need actual timestamp, just want to put the samples at last */
20 #define OFF_CPU_TIMESTAMP (~0ull << 32)
22 static struct off_cpu_bpf *skel;
33 struct perf_event_header hdr;
34 u64 array[1024 / sizeof(u64)];
37 static int off_cpu_config(struct evlist *evlist)
40 struct perf_event_attr attr = {
41 .type = PERF_TYPE_SOFTWARE,
42 .config = PERF_COUNT_SW_BPF_OUTPUT,
43 .size = sizeof(attr), /* to capture ABI version */
45 char *evname = strdup(OFFCPU_EVENT);
50 evsel = evsel__new(&attr);
56 evsel->core.attr.freq = 1;
57 evsel->core.attr.sample_period = 1;
58 /* off-cpu analysis depends on stack trace */
59 evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
61 evlist__add(evlist, evsel);
69 static void off_cpu_start(void *arg)
71 struct evlist *evlist = arg;
73 /* update task filter for the given workload */
74 if (!skel->bss->has_cpu && !skel->bss->has_task &&
75 perf_thread_map__pid(evlist->core.threads, 0) != -1) {
80 skel->bss->has_task = 1;
81 fd = bpf_map__fd(skel->maps.task_filter);
82 pid = perf_thread_map__pid(evlist->core.threads, 0);
83 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
86 skel->bss->enabled = 1;
89 static void off_cpu_finish(void *arg __maybe_unused)
91 skel->bss->enabled = 0;
92 off_cpu_bpf__destroy(skel);
95 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
96 static void check_sched_switch_args(void)
98 const struct btf *btf = bpf_object__btf(skel->obj);
99 const struct btf_type *t1, *t2, *t3;
102 type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
104 if ((s32)type_id < 0)
107 t1 = btf__type_by_id(btf, type_id);
111 t2 = btf__type_by_id(btf, t1->type);
112 if (t2 == NULL || !btf_is_ptr(t2))
115 t3 = btf__type_by_id(btf, t2->type);
116 if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
117 /* new format: pass prev_state as 4th arg */
118 skel->rodata->has_prev_state = true;
122 int off_cpu_prepare(struct evlist *evlist, struct target *target,
123 struct record_opts *opts)
126 int ncpus = 1, ntasks = 1, ncgrps = 1;
128 if (off_cpu_config(evlist) < 0) {
129 pr_err("Failed to config off-cpu BPF event\n");
133 skel = off_cpu_bpf__open();
135 pr_err("Failed to open off-cpu BPF skeleton\n");
139 /* don't need to set cpu filter for system-wide mode */
140 if (target->cpu_list) {
141 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
142 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
145 if (target__has_task(target)) {
146 ntasks = perf_thread_map__nr(evlist->core.threads);
147 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
150 if (evlist__first(evlist)->cgrp) {
151 ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
152 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
154 if (!cgroup_is_v2("perf_event"))
155 skel->rodata->uses_cgroup_v1 = true;
158 if (opts->record_cgroup) {
159 skel->rodata->needs_cgroup = true;
161 if (!cgroup_is_v2("perf_event"))
162 skel->rodata->uses_cgroup_v1 = true;
166 check_sched_switch_args();
168 err = off_cpu_bpf__load(skel);
170 pr_err("Failed to load off-cpu skeleton\n");
174 if (target->cpu_list) {
178 skel->bss->has_cpu = 1;
179 fd = bpf_map__fd(skel->maps.cpu_filter);
181 for (i = 0; i < ncpus; i++) {
182 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
183 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
187 if (target__has_task(target)) {
191 skel->bss->has_task = 1;
192 fd = bpf_map__fd(skel->maps.task_filter);
194 for (i = 0; i < ntasks; i++) {
195 pid = perf_thread_map__pid(evlist->core.threads, i);
196 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
200 if (evlist__first(evlist)->cgrp) {
204 skel->bss->has_cgroup = 1;
205 fd = bpf_map__fd(skel->maps.cgroup_filter);
207 evlist__for_each_entry(evlist, evsel) {
208 struct cgroup *cgrp = evsel->cgrp;
213 if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
214 pr_err("Failed to read cgroup id of %s\n",
219 bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
223 err = off_cpu_bpf__attach(skel);
225 pr_err("Failed to attach off-cpu BPF skeleton\n");
229 if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
230 perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
231 pr_err("Failed to attach off-cpu skeleton\n");
238 off_cpu_bpf__destroy(skel);
242 int off_cpu_write(struct perf_session *session)
246 u64 sample_type, val, sid = 0;
248 struct perf_data_file *file = &session->data->file;
249 struct off_cpu_key prev, key;
250 union off_cpu_data data = {
252 .type = PERF_RECORD_SAMPLE,
253 .misc = PERF_RECORD_MISC_USER,
256 u64 tstamp = OFF_CPU_TIMESTAMP;
258 skel->bss->enabled = 0;
260 evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
262 pr_err("%s evsel not found\n", OFFCPU_EVENT);
266 sample_type = evsel->core.attr.sample_type;
268 if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
269 pr_err("not supported sample type: %llx\n",
270 (unsigned long long)sample_type);
274 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
276 sid = evsel->core.id[0];
279 fd = bpf_map__fd(skel->maps.off_cpu);
280 stack = bpf_map__fd(skel->maps.stacks);
281 memset(&prev, 0, sizeof(prev));
283 while (!bpf_map_get_next_key(fd, &prev, &key)) {
284 int n = 1; /* start from perf_event_header */
287 bpf_map_lookup_elem(fd, &key, &val);
289 if (sample_type & PERF_SAMPLE_IDENTIFIER)
290 data.array[n++] = sid;
291 if (sample_type & PERF_SAMPLE_IP) {
293 data.array[n++] = 0; /* will be updated */
295 if (sample_type & PERF_SAMPLE_TID)
296 data.array[n++] = (u64)key.pid << 32 | key.tgid;
297 if (sample_type & PERF_SAMPLE_TIME)
298 data.array[n++] = tstamp;
299 if (sample_type & PERF_SAMPLE_ID)
300 data.array[n++] = sid;
301 if (sample_type & PERF_SAMPLE_CPU)
303 if (sample_type & PERF_SAMPLE_PERIOD)
304 data.array[n++] = val;
305 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
308 /* data.array[n] is callchain->nr (updated later) */
309 data.array[n + 1] = PERF_CONTEXT_USER;
310 data.array[n + 2] = 0;
312 bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
313 while (data.array[n + 2 + len])
316 /* update length of callchain */
317 data.array[n] = len + 1;
319 /* update sample ip with the first callchain entry */
321 data.array[ip_pos] = data.array[n + 2];
323 /* calculate sample callchain data array length */
326 if (sample_type & PERF_SAMPLE_CGROUP)
327 data.array[n++] = key.cgroup_id;
329 size = n * sizeof(u64);
330 data.hdr.size = size;
333 if (perf_data_file__write(file, &data, size) < 0) {
334 pr_err("failed to write perf data, error: %m\n");
339 /* increase dummy timestamp to sort later samples */