1 // SPDX-License-Identifier: GPL-2.0
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
11 #include "util/build-id.h"
12 #include <subcmd/parse-options.h>
13 #include "util/parse-events.h"
14 #include "util/config.h"
16 #include "util/callchain.h"
17 #include "util/cgroup.h"
18 #include "util/header.h"
19 #include "util/event.h"
20 #include "util/evlist.h"
21 #include "util/evsel.h"
22 #include "util/debug.h"
23 #include "util/mmap.h"
24 #include "util/target.h"
25 #include "util/session.h"
26 #include "util/tool.h"
27 #include "util/symbol.h"
28 #include "util/record.h"
29 #include "util/cpumap.h"
30 #include "util/thread_map.h"
31 #include "util/data.h"
32 #include "util/perf_regs.h"
33 #include "util/auxtrace.h"
35 #include "util/parse-branch-options.h"
36 #include "util/parse-regs-options.h"
37 #include "util/perf_api_probe.h"
38 #include "util/llvm-utils.h"
39 #include "util/bpf-loader.h"
40 #include "util/trigger.h"
41 #include "util/perf-hooks.h"
42 #include "util/cpu-set-sched.h"
43 #include "util/synthetic-events.h"
44 #include "util/time-utils.h"
45 #include "util/units.h"
46 #include "util/bpf-event.h"
47 #include "util/util.h"
49 #include "util/clockid.h"
50 #include "util/pmu-hybrid.h"
51 #include "util/evlist-hybrid.h"
63 #ifdef HAVE_EVENTFD_SUPPORT
64 #include <sys/eventfd.h>
68 #include <sys/types.h>
71 #include <linux/err.h>
72 #include <linux/string.h>
73 #include <linux/time64.h>
74 #include <linux/zalloc.h>
75 #include <linux/bitmap.h>
78 struct switch_output {
91 struct perf_tool tool;
92 struct record_opts opts;
94 struct perf_data data;
95 struct auxtrace_record *itr;
96 struct evlist *evlist;
97 struct perf_session *session;
98 struct evlist *sb_evlist;
101 bool switch_output_event_set;
104 bool no_buildid_cache;
105 bool no_buildid_cache_set;
108 bool timestamp_filename;
109 bool timestamp_boundary;
110 struct switch_output switch_output;
111 unsigned long long samples;
112 struct mmap_cpu_mask affinity_mask;
113 unsigned long output_max_size; /* = 0: unlimited */
116 static volatile int done;
118 static volatile int auxtrace_record__snapshot_started;
119 static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
120 static DEFINE_TRIGGER(switch_output_trigger);
122 static const char *affinity_tags[PERF_AFFINITY_MAX] = {
126 static bool switch_output_signal(struct record *rec)
128 return rec->switch_output.signal &&
129 trigger_is_ready(&switch_output_trigger);
132 static bool switch_output_size(struct record *rec)
134 return rec->switch_output.size &&
135 trigger_is_ready(&switch_output_trigger) &&
136 (rec->bytes_written >= rec->switch_output.size);
139 static bool switch_output_time(struct record *rec)
141 return rec->switch_output.time &&
142 trigger_is_ready(&switch_output_trigger);
145 static bool record__output_max_size_exceeded(struct record *rec)
147 return rec->output_max_size &&
148 (rec->bytes_written >= rec->output_max_size);
151 static int record__write(struct record *rec, struct mmap *map __maybe_unused,
152 void *bf, size_t size)
154 struct perf_data_file *file = &rec->session->data->file;
156 if (perf_data_file__write(file, bf, size) < 0) {
157 pr_err("failed to write perf data, error: %m\n");
161 rec->bytes_written += size;
163 if (record__output_max_size_exceeded(rec) && !done) {
164 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
165 " stopping session ]\n",
166 rec->bytes_written >> 10);
170 if (switch_output_size(rec))
171 trigger_hit(&switch_output_trigger);
176 static int record__aio_enabled(struct record *rec);
177 static int record__comp_enabled(struct record *rec);
178 static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
179 void *src, size_t src_size);
181 #ifdef HAVE_AIO_SUPPORT
182 static int record__aio_write(struct aiocb *cblock, int trace_fd,
183 void *buf, size_t size, off_t off)
187 cblock->aio_fildes = trace_fd;
188 cblock->aio_buf = buf;
189 cblock->aio_nbytes = size;
190 cblock->aio_offset = off;
191 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
194 rc = aio_write(cblock);
197 } else if (errno != EAGAIN) {
198 cblock->aio_fildes = -1;
199 pr_err("failed to queue perf data, error: %m\n");
207 static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
213 ssize_t aio_ret, written;
215 aio_errno = aio_error(cblock);
216 if (aio_errno == EINPROGRESS)
219 written = aio_ret = aio_return(cblock);
221 if (aio_errno != EINTR)
222 pr_err("failed to write perf data, error: %m\n");
226 rem_size = cblock->aio_nbytes - written;
229 cblock->aio_fildes = -1;
231 * md->refcount is incremented in record__aio_pushfn() for
232 * every aio write request started in record__aio_push() so
233 * decrement it because the request is now complete.
235 perf_mmap__put(&md->core);
239 * aio write request may require restart with the
240 * reminder if the kernel didn't write whole
243 rem_off = cblock->aio_offset + written;
244 rem_buf = (void *)(cblock->aio_buf + written);
245 record__aio_write(cblock, cblock->aio_fildes,
246 rem_buf, rem_size, rem_off);
253 static int record__aio_sync(struct mmap *md, bool sync_all)
255 struct aiocb **aiocb = md->aio.aiocb;
256 struct aiocb *cblocks = md->aio.cblocks;
257 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
262 for (i = 0; i < md->aio.nr_cblocks; ++i) {
263 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
270 * Started aio write is not complete yet
271 * so it has to be waited before the
274 aiocb[i] = &cblocks[i];
281 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
282 if (!(errno == EAGAIN || errno == EINTR))
283 pr_err("failed to sync perf data, error: %m\n");
294 static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
296 struct record_aio *aio = to;
299 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
300 * to release space in the kernel buffer as fast as possible, calling
301 * perf_mmap__consume() from perf_mmap__push() function.
303 * That lets the kernel to proceed with storing more profiling data into
304 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
306 * Coping can be done in two steps in case the chunk of profiling data
307 * crosses the upper bound of the kernel buffer. In this case we first move
308 * part of data from map->start till the upper bound and then the reminder
309 * from the beginning of the kernel buffer till the end of the data chunk.
312 if (record__comp_enabled(aio->rec)) {
313 size = zstd_compress(aio->rec->session, aio->data + aio->size,
314 mmap__mmap_len(map) - aio->size,
317 memcpy(aio->data + aio->size, buf, size);
322 * Increment map->refcount to guard map->aio.data[] buffer
323 * from premature deallocation because map object can be
324 * released earlier than aio write request started on
325 * map->aio.data[] buffer is complete.
327 * perf_mmap__put() is done at record__aio_complete()
328 * after started aio request completion or at record__aio_push()
329 * if the request failed to start.
331 perf_mmap__get(&map->core);
339 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
342 int trace_fd = rec->session->data->file.fd;
343 struct record_aio aio = { .rec = rec, .size = 0 };
346 * Call record__aio_sync() to wait till map->aio.data[] buffer
347 * becomes available after previous aio write operation.
350 idx = record__aio_sync(map, false);
351 aio.data = map->aio.data[idx];
352 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
353 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
357 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
360 rec->bytes_written += aio.size;
361 if (switch_output_size(rec))
362 trigger_hit(&switch_output_trigger);
365 * Decrement map->refcount incremented in record__aio_pushfn()
366 * back if record__aio_write() operation failed to start, otherwise
367 * map->refcount is decremented in record__aio_complete() after
368 * aio write operation finishes successfully.
370 perf_mmap__put(&map->core);
376 static off_t record__aio_get_pos(int trace_fd)
378 return lseek(trace_fd, 0, SEEK_CUR);
381 static void record__aio_set_pos(int trace_fd, off_t pos)
383 lseek(trace_fd, pos, SEEK_SET);
386 static void record__aio_mmap_read_sync(struct record *rec)
389 struct evlist *evlist = rec->evlist;
390 struct mmap *maps = evlist->mmap;
392 if (!record__aio_enabled(rec))
395 for (i = 0; i < evlist->core.nr_mmaps; i++) {
396 struct mmap *map = &maps[i];
399 record__aio_sync(map, true);
403 static int nr_cblocks_default = 1;
404 static int nr_cblocks_max = 4;
406 static int record__aio_parse(const struct option *opt,
410 struct record_opts *opts = (struct record_opts *)opt->value;
413 opts->nr_cblocks = 0;
416 opts->nr_cblocks = strtol(str, NULL, 0);
417 if (!opts->nr_cblocks)
418 opts->nr_cblocks = nr_cblocks_default;
423 #else /* HAVE_AIO_SUPPORT */
424 static int nr_cblocks_max = 0;
426 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
427 off_t *off __maybe_unused)
432 static off_t record__aio_get_pos(int trace_fd __maybe_unused)
437 static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
441 static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
446 static int record__aio_enabled(struct record *rec)
448 return rec->opts.nr_cblocks > 0;
451 #define MMAP_FLUSH_DEFAULT 1
452 static int record__mmap_flush_parse(const struct option *opt,
457 struct record_opts *opts = (struct record_opts *)opt->value;
458 static struct parse_tag tags[] = {
459 { .tag = 'B', .mult = 1 },
460 { .tag = 'K', .mult = 1 << 10 },
461 { .tag = 'M', .mult = 1 << 20 },
462 { .tag = 'G', .mult = 1 << 30 },
470 opts->mmap_flush = parse_tag_value(str, tags);
471 if (opts->mmap_flush == (int)-1)
472 opts->mmap_flush = strtol(str, NULL, 0);
475 if (!opts->mmap_flush)
476 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
478 flush_max = evlist__mmap_size(opts->mmap_pages);
480 if (opts->mmap_flush > flush_max)
481 opts->mmap_flush = flush_max;
486 #ifdef HAVE_ZSTD_SUPPORT
487 static unsigned int comp_level_default = 1;
489 static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
491 struct record_opts *opts = opt->value;
494 opts->comp_level = 0;
497 opts->comp_level = strtol(str, NULL, 0);
498 if (!opts->comp_level)
499 opts->comp_level = comp_level_default;
505 static unsigned int comp_level_max = 22;
507 static int record__comp_enabled(struct record *rec)
509 return rec->opts.comp_level > 0;
512 static int process_synthesized_event(struct perf_tool *tool,
513 union perf_event *event,
514 struct perf_sample *sample __maybe_unused,
515 struct machine *machine __maybe_unused)
517 struct record *rec = container_of(tool, struct record, tool);
518 return record__write(rec, NULL, event, event->header.size);
521 static int process_locked_synthesized_event(struct perf_tool *tool,
522 union perf_event *event,
523 struct perf_sample *sample __maybe_unused,
524 struct machine *machine __maybe_unused)
526 static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER;
529 pthread_mutex_lock(&synth_lock);
530 ret = process_synthesized_event(tool, event, sample, machine);
531 pthread_mutex_unlock(&synth_lock);
535 static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
537 struct record *rec = to;
539 if (record__comp_enabled(rec)) {
540 size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
545 return record__write(rec, map, bf, size);
548 static volatile int signr = -1;
549 static volatile int child_finished;
550 #ifdef HAVE_EVENTFD_SUPPORT
551 static int done_fd = -1;
554 static void sig_handler(int sig)
562 #ifdef HAVE_EVENTFD_SUPPORT
566 * It is possible for this signal handler to run after done is checked
567 * in the main loop, but before the perf counter fds are polled. If this
568 * happens, the poll() will continue to wait even though done is set,
569 * and will only break out if either another signal is received, or the
570 * counters are ready for read. To ensure the poll() doesn't sleep when
571 * done is set, use an eventfd (done_fd) to wake up the poll().
573 if (write(done_fd, &tmp, sizeof(tmp)) < 0)
574 pr_err("failed to signal wakeup fd, error: %m\n");
576 #endif // HAVE_EVENTFD_SUPPORT
579 static void sigsegv_handler(int sig)
581 perf_hooks__recover();
582 sighandler_dump_stack(sig);
585 static void record__sig_exit(void)
590 signal(signr, SIG_DFL);
594 #ifdef HAVE_AUXTRACE_SUPPORT
596 static int record__process_auxtrace(struct perf_tool *tool,
598 union perf_event *event, void *data1,
599 size_t len1, void *data2, size_t len2)
601 struct record *rec = container_of(tool, struct record, tool);
602 struct perf_data *data = &rec->data;
606 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
608 int fd = perf_data__fd(data);
611 file_offset = lseek(fd, 0, SEEK_CUR);
612 if (file_offset == -1)
614 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
620 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
621 padding = (len1 + len2) & 7;
623 padding = 8 - padding;
625 record__write(rec, map, event, event->header.size);
626 record__write(rec, map, data1, len1);
628 record__write(rec, map, data2, len2);
629 record__write(rec, map, &pad, padding);
634 static int record__auxtrace_mmap_read(struct record *rec,
639 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
640 record__process_auxtrace);
650 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
655 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
656 record__process_auxtrace,
657 rec->opts.auxtrace_snapshot_size);
667 static int record__auxtrace_read_snapshot_all(struct record *rec)
672 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
673 struct mmap *map = &rec->evlist->mmap[i];
675 if (!map->auxtrace_mmap.base)
678 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
687 static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
689 pr_debug("Recording AUX area tracing snapshot\n");
690 if (record__auxtrace_read_snapshot_all(rec) < 0) {
691 trigger_error(&auxtrace_snapshot_trigger);
693 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
694 trigger_error(&auxtrace_snapshot_trigger);
696 trigger_ready(&auxtrace_snapshot_trigger);
700 static int record__auxtrace_snapshot_exit(struct record *rec)
702 if (trigger_is_error(&auxtrace_snapshot_trigger))
705 if (!auxtrace_record__snapshot_started &&
706 auxtrace_record__snapshot_start(rec->itr))
709 record__read_auxtrace_snapshot(rec, true);
710 if (trigger_is_error(&auxtrace_snapshot_trigger))
716 static int record__auxtrace_init(struct record *rec)
721 rec->itr = auxtrace_record__init(rec->evlist, &err);
726 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
727 rec->opts.auxtrace_snapshot_opts);
731 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
732 rec->opts.auxtrace_sample_opts);
736 auxtrace_regroup_aux_output(rec->evlist);
738 return auxtrace_parse_filters(rec->evlist);
744 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
745 struct mmap *map __maybe_unused)
751 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
752 bool on_exit __maybe_unused)
757 int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
763 int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
768 static int record__auxtrace_init(struct record *rec __maybe_unused)
775 static int record__config_text_poke(struct evlist *evlist)
780 /* Nothing to do if text poke is already configured */
781 evlist__for_each_entry(evlist, evsel) {
782 if (evsel->core.attr.text_poke)
786 err = parse_events(evlist, "dummy:u", NULL);
790 evsel = evlist__last(evlist);
792 evsel->core.attr.freq = 0;
793 evsel->core.attr.sample_period = 1;
794 evsel->core.attr.text_poke = 1;
795 evsel->core.attr.ksymbol = 1;
797 evsel->core.system_wide = true;
798 evsel->no_aux_samples = true;
799 evsel->immediate = true;
801 /* Text poke must be collected on all CPUs */
802 perf_cpu_map__put(evsel->core.own_cpus);
803 evsel->core.own_cpus = perf_cpu_map__new(NULL);
804 perf_cpu_map__put(evsel->core.cpus);
805 evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
807 evsel__set_sample_bit(evsel, TIME);
812 static bool record__kcore_readable(struct machine *machine)
814 char kcore[PATH_MAX];
817 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
819 fd = open(kcore, O_RDONLY);
828 static int record__kcore_copy(struct machine *machine, struct perf_data *data)
830 char from_dir[PATH_MAX];
831 char kcore_dir[PATH_MAX];
834 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
836 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
840 return kcore_copy(from_dir, kcore_dir);
843 static int record__mmap_evlist(struct record *rec,
844 struct evlist *evlist)
846 struct record_opts *opts = &rec->opts;
847 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
848 opts->auxtrace_sample_mode;
851 if (opts->affinity != PERF_AFFINITY_SYS)
852 cpu__setup_cpunode_map();
854 if (evlist__mmap_ex(evlist, opts->mmap_pages,
855 opts->auxtrace_mmap_pages,
857 opts->nr_cblocks, opts->affinity,
858 opts->mmap_flush, opts->comp_level) < 0) {
859 if (errno == EPERM) {
860 pr_err("Permission error mapping pages.\n"
861 "Consider increasing "
862 "/proc/sys/kernel/perf_event_mlock_kb,\n"
863 "or try again with a smaller value of -m/--mmap_pages.\n"
864 "(current value: %u,%u)\n",
865 opts->mmap_pages, opts->auxtrace_mmap_pages);
868 pr_err("failed to mmap with %d (%s)\n", errno,
869 str_error_r(errno, msg, sizeof(msg)));
879 static int record__mmap(struct record *rec)
881 return record__mmap_evlist(rec, rec->evlist);
884 static int record__open(struct record *rec)
888 struct evlist *evlist = rec->evlist;
889 struct perf_session *session = rec->session;
890 struct record_opts *opts = &rec->opts;
894 * For initial_delay or system wide, we need to add a dummy event so
895 * that we can track PERF_RECORD_MMAP to cover the delay of waiting or
898 if (opts->initial_delay || target__has_cpu(&opts->target)) {
899 pos = evlist__get_tracking_event(evlist);
900 if (!evsel__is_dummy_event(pos)) {
901 /* Set up dummy event. */
902 if (evlist__add_dummy(evlist))
904 pos = evlist__last(evlist);
905 evlist__set_tracking_event(evlist, pos);
909 * Enable the dummy event when the process is forked for
910 * initial_delay, immediately for system wide.
912 if (opts->initial_delay && !pos->immediate)
913 pos->core.attr.enable_on_exec = 1;
918 evlist__config(evlist, opts, &callchain_param);
920 evlist__for_each_entry(evlist, pos) {
922 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
923 if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
925 ui__warning("%s\n", msg);
928 if ((errno == EINVAL || errno == EBADF) &&
929 pos->leader != pos &&
931 pos = evlist__reset_weak_group(evlist, pos, true);
935 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
936 ui__error("%s\n", msg);
940 pos->supported = true;
943 if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
945 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
946 "check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
947 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
948 "file is not found in the buildid cache or in the vmlinux path.\n\n"
949 "Samples in kernel modules won't be resolved at all.\n\n"
950 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
951 "even with a suitable vmlinux or kallsyms file.\n\n");
954 if (evlist__apply_filters(evlist, &pos)) {
955 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
956 pos->filter, evsel__name(pos), errno,
957 str_error_r(errno, msg, sizeof(msg)));
962 rc = record__mmap(rec);
966 session->evlist = evlist;
967 perf_session__set_id_hdr_size(session);
972 static int process_sample_event(struct perf_tool *tool,
973 union perf_event *event,
974 struct perf_sample *sample,
976 struct machine *machine)
978 struct record *rec = container_of(tool, struct record, tool);
980 if (rec->evlist->first_sample_time == 0)
981 rec->evlist->first_sample_time = sample->time;
983 rec->evlist->last_sample_time = sample->time;
985 if (rec->buildid_all)
989 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
992 static int process_buildids(struct record *rec)
994 struct perf_session *session = rec->session;
996 if (perf_data__size(&rec->data) == 0)
1000 * During this process, it'll load kernel map and replace the
1001 * dso->long_name to a real pathname it found. In this case
1002 * we prefer the vmlinux path like
1003 * /lib/modules/3.16.4/build/vmlinux
1005 * rather than build-id path (in debug directory).
1006 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
1008 symbol_conf.ignore_vmlinux_buildid = true;
1011 * If --buildid-all is given, it marks all DSO regardless of hits,
1012 * so no need to process samples. But if timestamp_boundary is enabled,
1013 * it still needs to walk on all samples to get the timestamps of
1014 * first/last samples.
1016 if (rec->buildid_all && !rec->timestamp_boundary)
1017 rec->tool.sample = NULL;
1019 return perf_session__process_events(session);
1022 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
1025 struct perf_tool *tool = data;
1027 *As for guest kernel when processing subcommand record&report,
1028 *we arrange module mmap prior to guest kernel mmap and trigger
1029 *a preload dso because default guest module symbols are loaded
1030 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
1031 *method is used to avoid symbol missing when the first addr is
1032 *in module instead of in guest kernel.
1034 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1037 pr_err("Couldn't record guest kernel [%d]'s reference"
1038 " relocation symbol.\n", machine->pid);
1041 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
1042 * have no _text sometimes.
1044 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1047 pr_err("Couldn't record guest kernel [%d]'s reference"
1048 " relocation symbol.\n", machine->pid);
1051 static struct perf_event_header finished_round_event = {
1052 .size = sizeof(struct perf_event_header),
1053 .type = PERF_RECORD_FINISHED_ROUND,
1056 static void record__adjust_affinity(struct record *rec, struct mmap *map)
1058 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
1059 !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
1060 rec->affinity_mask.nbits)) {
1061 bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
1062 bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
1063 map->affinity_mask.bits, rec->affinity_mask.nbits);
1064 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
1065 (cpu_set_t *)rec->affinity_mask.bits);
1067 mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
1071 static size_t process_comp_header(void *record, size_t increment)
1073 struct perf_record_compressed *event = record;
1074 size_t size = sizeof(*event);
1077 event->header.size += increment;
1081 event->header.type = PERF_RECORD_COMPRESSED;
1082 event->header.size = size;
1087 static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
1088 void *src, size_t src_size)
1091 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
1093 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
1094 max_record_size, process_comp_header);
1096 session->bytes_transferred += src_size;
1097 session->bytes_compressed += compressed;
1102 static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
1103 bool overwrite, bool synch)
1105 u64 bytes_written = rec->bytes_written;
1109 int trace_fd = rec->data.file.fd;
1115 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
1119 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
1122 if (record__aio_enabled(rec))
1123 off = record__aio_get_pos(trace_fd);
1125 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1127 struct mmap *map = &maps[i];
1129 if (map->core.base) {
1130 record__adjust_affinity(rec, map);
1132 flush = map->core.flush;
1133 map->core.flush = 1;
1135 if (!record__aio_enabled(rec)) {
1136 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
1138 map->core.flush = flush;
1143 if (record__aio_push(rec, map, &off) < 0) {
1144 record__aio_set_pos(trace_fd, off);
1146 map->core.flush = flush;
1152 map->core.flush = flush;
1155 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1156 !rec->opts.auxtrace_sample_mode &&
1157 record__auxtrace_mmap_read(rec, map) != 0) {
1163 if (record__aio_enabled(rec))
1164 record__aio_set_pos(trace_fd, off);
1167 * Mark the round finished in case we wrote
1168 * at least one event.
1170 if (bytes_written != rec->bytes_written)
1171 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
1174 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
1179 static int record__mmap_read_all(struct record *rec, bool synch)
1183 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
1187 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
1190 static void record__init_features(struct record *rec)
1192 struct perf_session *session = rec->session;
1195 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1196 perf_header__set_feat(&session->header, feat);
1198 if (rec->no_buildid)
1199 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1201 if (!have_tracepoints(&rec->evlist->core.entries))
1202 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1204 if (!rec->opts.branch_stack)
1205 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1207 if (!rec->opts.full_auxtrace)
1208 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1210 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1211 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1213 if (!rec->opts.use_clockid)
1214 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
1216 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
1217 if (!record__comp_enabled(rec))
1218 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
1220 perf_header__clear_feat(&session->header, HEADER_STAT);
1224 record__finish_output(struct record *rec)
1226 struct perf_data *data = &rec->data;
1227 int fd = perf_data__fd(data);
1232 rec->session->header.data_size += rec->bytes_written;
1233 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
1235 if (!rec->no_buildid) {
1236 process_buildids(rec);
1238 if (rec->buildid_all)
1239 dsos__hit_all(rec->session);
1241 perf_session__write_header(rec->session, rec->evlist, fd, true);
1246 static int record__synthesize_workload(struct record *rec, bool tail)
1249 struct perf_thread_map *thread_map;
1251 if (rec->opts.tail_synthesize != tail)
1254 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1255 if (thread_map == NULL)
1258 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
1259 process_synthesized_event,
1260 &rec->session->machines.host,
1261 rec->opts.sample_address);
1262 perf_thread_map__put(thread_map);
1266 static int record__synthesize(struct record *rec, bool tail);
1269 record__switch_output(struct record *rec, bool at_exit)
1271 struct perf_data *data = &rec->data;
1275 /* Same Size: "2015122520103046"*/
1276 char timestamp[] = "InvalidTimestamp";
1278 record__aio_mmap_read_sync(rec);
1280 record__synthesize(rec, true);
1281 if (target__none(&rec->opts.target))
1282 record__synthesize_workload(rec, true);
1285 record__finish_output(rec);
1286 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1288 pr_err("Failed to get current timestamp\n");
1292 fd = perf_data__switch(data, timestamp,
1293 rec->session->header.data_offset,
1294 at_exit, &new_filename);
1295 if (fd >= 0 && !at_exit) {
1296 rec->bytes_written = 0;
1297 rec->session->header.data_size = 0;
1301 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
1302 data->path, timestamp);
1304 if (rec->switch_output.num_files) {
1305 int n = rec->switch_output.cur_file + 1;
1307 if (n >= rec->switch_output.num_files)
1309 rec->switch_output.cur_file = n;
1310 if (rec->switch_output.filenames[n]) {
1311 remove(rec->switch_output.filenames[n]);
1312 zfree(&rec->switch_output.filenames[n]);
1314 rec->switch_output.filenames[n] = new_filename;
1319 /* Output tracking events */
1321 record__synthesize(rec, false);
1324 * In 'perf record --switch-output' without -a,
1325 * record__synthesize() in record__switch_output() won't
1326 * generate tracking events because there's no thread_map
1327 * in evlist. Which causes newly created perf.data doesn't
1328 * contain map and comm information.
1329 * Create a fake thread_map and directly call
1330 * perf_event__synthesize_thread_map() for those events.
1332 if (target__none(&rec->opts.target))
1333 record__synthesize_workload(rec, false);
1338 static volatile int workload_exec_errno;
1341 * evlist__prepare_workload will send a SIGUSR1
1342 * if the fork fails, since we asked by setting its
1343 * want_signal to true.
1345 static void workload_exec_failed_signal(int signo __maybe_unused,
1347 void *ucontext __maybe_unused)
1349 workload_exec_errno = info->si_value.sival_int;
1354 static void snapshot_sig_handler(int sig);
1355 static void alarm_sig_handler(int sig);
1357 static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
1360 if (evlist->mmap && evlist->mmap[0].core.base)
1361 return evlist->mmap[0].core.base;
1362 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1363 return evlist->overwrite_mmap[0].core.base;
1368 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1370 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
1376 static int record__synthesize(struct record *rec, bool tail)
1378 struct perf_session *session = rec->session;
1379 struct machine *machine = &session->machines.host;
1380 struct perf_data *data = &rec->data;
1381 struct record_opts *opts = &rec->opts;
1382 struct perf_tool *tool = &rec->tool;
1383 int fd = perf_data__fd(data);
1385 event_op f = process_synthesized_event;
1387 if (rec->opts.tail_synthesize != tail)
1390 if (data->is_pipe) {
1392 * We need to synthesize events first, because some
1393 * features works on top of them (on report side).
1395 err = perf_event__synthesize_attrs(tool, rec->evlist,
1396 process_synthesized_event);
1398 pr_err("Couldn't synthesize attrs.\n");
1402 err = perf_event__synthesize_features(tool, session, rec->evlist,
1403 process_synthesized_event);
1405 pr_err("Couldn't synthesize features.\n");
1409 if (have_tracepoints(&rec->evlist->core.entries)) {
1411 * FIXME err <= 0 here actually means that
1412 * there were no tracepoints so its not really
1413 * an error, just that we don't need to
1414 * synthesize anything. We really have to
1415 * return this more properly and also
1416 * propagate errors that now are calling die()
1418 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1419 process_synthesized_event);
1421 pr_err("Couldn't record tracing data.\n");
1424 rec->bytes_written += err;
1428 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
1429 process_synthesized_event, machine);
1433 /* Synthesize id_index before auxtrace_info */
1434 if (rec->opts.auxtrace_sample_mode) {
1435 err = perf_event__synthesize_id_index(tool,
1436 process_synthesized_event,
1437 session->evlist, machine);
1442 if (rec->opts.full_auxtrace) {
1443 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1444 session, process_synthesized_event);
1449 if (!evlist__exclude_kernel(rec->evlist)) {
1450 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1452 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1453 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1454 "Check /proc/kallsyms permission or run as root.\n");
1456 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1458 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1459 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1460 "Check /proc/modules permission or run as root.\n");
1464 machines__process_guests(&session->machines,
1465 perf_event__synthesize_guest_os, tool);
1468 err = perf_event__synthesize_extra_attr(&rec->tool,
1470 process_synthesized_event,
1475 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
1476 process_synthesized_event,
1479 pr_err("Couldn't synthesize thread map.\n");
1483 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
1484 process_synthesized_event, NULL);
1486 pr_err("Couldn't synthesize cpu map.\n");
1490 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
1493 pr_warning("Couldn't synthesize bpf events.\n");
1495 err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
1498 pr_warning("Couldn't synthesize cgroup events.\n");
1500 if (rec->opts.nr_threads_synthesize > 1) {
1501 perf_set_multithreaded();
1502 f = process_locked_synthesized_event;
1505 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
1506 f, opts->sample_address,
1507 rec->opts.nr_threads_synthesize);
1509 if (rec->opts.nr_threads_synthesize > 1)
1510 perf_set_singlethreaded();
1516 static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
1518 struct record *rec = data;
1519 pthread_kill(rec->thread_id, SIGUSR2);
1523 static int record__setup_sb_evlist(struct record *rec)
1525 struct record_opts *opts = &rec->opts;
1527 if (rec->sb_evlist != NULL) {
1529 * We get here if --switch-output-event populated the
1530 * sb_evlist, so associate a callback that will send a SIGUSR2
1531 * to the main thread.
1533 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
1534 rec->thread_id = pthread_self();
1536 #ifdef HAVE_LIBBPF_SUPPORT
1537 if (!opts->no_bpf_event) {
1538 if (rec->sb_evlist == NULL) {
1539 rec->sb_evlist = evlist__new();
1541 if (rec->sb_evlist == NULL) {
1542 pr_err("Couldn't create side band evlist.\n.");
1547 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
1548 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1553 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
1554 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1555 opts->no_bpf_event = true;
1561 static int record__init_clock(struct record *rec)
1563 struct perf_session *session = rec->session;
1564 struct timespec ref_clockid;
1565 struct timeval ref_tod;
1568 if (!rec->opts.use_clockid)
1571 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1572 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
1574 session->header.env.clock.clockid = rec->opts.clockid;
1576 if (gettimeofday(&ref_tod, NULL) != 0) {
1577 pr_err("gettimeofday failed, cannot set reference time.\n");
1581 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
1582 pr_err("clock_gettime failed, cannot set reference time.\n");
1586 ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
1587 (u64) ref_tod.tv_usec * NSEC_PER_USEC;
1589 session->header.env.clock.tod_ns = ref;
1591 ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
1592 (u64) ref_clockid.tv_nsec;
1594 session->header.env.clock.clockid_ns = ref;
1598 static void hit_auxtrace_snapshot_trigger(struct record *rec)
1600 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1601 trigger_hit(&auxtrace_snapshot_trigger);
1602 auxtrace_record__snapshot_started = 1;
1603 if (auxtrace_record__snapshot_start(rec->itr))
1604 trigger_error(&auxtrace_snapshot_trigger);
1608 static void record__uniquify_name(struct record *rec)
1611 struct evlist *evlist = rec->evlist;
1615 if (!perf_pmu__has_hybrid())
1618 evlist__for_each_entry(evlist, pos) {
1619 if (!evsel__is_hybrid(pos))
1622 if (strchr(pos->name, '/'))
1625 ret = asprintf(&new_name, "%s/%s/",
1626 pos->pmu_name, pos->name);
1629 pos->name = new_name;
1634 static int __cmd_record(struct record *rec, int argc, const char **argv)
1638 unsigned long waking = 0;
1639 const bool forks = argc > 0;
1640 struct perf_tool *tool = &rec->tool;
1641 struct record_opts *opts = &rec->opts;
1642 struct perf_data *data = &rec->data;
1643 struct perf_session *session;
1644 bool disabled = false, draining = false;
1647 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
1649 atexit(record__sig_exit);
1650 signal(SIGCHLD, sig_handler);
1651 signal(SIGINT, sig_handler);
1652 signal(SIGTERM, sig_handler);
1653 signal(SIGSEGV, sigsegv_handler);
1655 if (rec->opts.record_namespaces)
1656 tool->namespace_events = true;
1658 if (rec->opts.record_cgroup) {
1659 #ifdef HAVE_FILE_HANDLE
1660 tool->cgroup_events = true;
1662 pr_err("cgroup tracking is not supported\n");
1667 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
1668 signal(SIGUSR2, snapshot_sig_handler);
1669 if (rec->opts.auxtrace_snapshot_mode)
1670 trigger_on(&auxtrace_snapshot_trigger);
1671 if (rec->switch_output.enabled)
1672 trigger_on(&switch_output_trigger);
1674 signal(SIGUSR2, SIG_IGN);
1677 session = perf_session__new(data, false, tool);
1678 if (IS_ERR(session)) {
1679 pr_err("Perf session creation failed.\n");
1680 return PTR_ERR(session);
1683 fd = perf_data__fd(data);
1684 rec->session = session;
1686 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1687 pr_err("Compression initialization failed.\n");
1690 #ifdef HAVE_EVENTFD_SUPPORT
1691 done_fd = eventfd(0, EFD_NONBLOCK);
1693 pr_err("Failed to create wakeup eventfd, error: %m\n");
1695 goto out_delete_session;
1697 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
1699 pr_err("Failed to add wakeup eventfd to poll list\n");
1701 goto out_delete_session;
1703 #endif // HAVE_EVENTFD_SUPPORT
1705 session->header.env.comp_type = PERF_COMP_ZSTD;
1706 session->header.env.comp_level = rec->opts.comp_level;
1708 if (rec->opts.kcore &&
1709 !record__kcore_readable(&session->machines.host)) {
1710 pr_err("ERROR: kcore is not readable.\n");
1714 if (record__init_clock(rec))
1717 record__init_features(rec);
1720 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
1721 workload_exec_failed_signal);
1723 pr_err("Couldn't run the workload!\n");
1725 goto out_delete_session;
1730 * If we have just single event and are sending data
1731 * through pipe, we need to force the ids allocation,
1732 * because we synthesize event name through the pipe
1733 * and need the id for that.
1735 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
1736 rec->opts.sample_id = true;
1738 record__uniquify_name(rec);
1740 if (record__open(rec) != 0) {
1744 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
1746 if (rec->opts.kcore) {
1747 err = record__kcore_copy(&session->machines.host, data);
1749 pr_err("ERROR: Failed to copy kcore\n");
1754 err = bpf__apply_obj_config();
1756 char errbuf[BUFSIZ];
1758 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1759 pr_err("ERROR: Apply config to BPF failed: %s\n",
1765 * Normally perf_session__new would do this, but it doesn't have the
1768 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
1769 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1770 rec->tool.ordered_events = false;
1773 if (!rec->evlist->nr_groups)
1774 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1776 if (data->is_pipe) {
1777 err = perf_header__write_pipe(fd);
1781 err = perf_session__write_header(session, rec->evlist, fd, false);
1787 if (!rec->no_buildid
1788 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
1789 pr_err("Couldn't generate buildids. "
1790 "Use --no-buildid to profile anyway.\n");
1794 err = record__setup_sb_evlist(rec);
1798 err = record__synthesize(rec, false);
1802 if (rec->realtime_prio) {
1803 struct sched_param param;
1805 param.sched_priority = rec->realtime_prio;
1806 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
1807 pr_err("Could not set realtime priority.\n");
1814 * When perf is starting the traced process, all the events
1815 * (apart from group members) have enable_on_exec=1 set,
1816 * so don't spoil it by prematurely enabling them.
1818 if (!target__none(&opts->target) && !opts->initial_delay)
1819 evlist__enable(rec->evlist);
1825 struct machine *machine = &session->machines.host;
1826 union perf_event *event;
1829 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1830 if (event == NULL) {
1836 * Some H/W events are generated before COMM event
1837 * which is emitted during exec(), so perf script
1838 * cannot see a correct process name for those events.
1839 * Synthesize COMM event to prevent it.
1841 tgid = perf_event__synthesize_comm(tool, event,
1842 rec->evlist->workload.pid,
1843 process_synthesized_event,
1850 event = malloc(sizeof(event->namespaces) +
1851 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1852 machine->id_hdr_size);
1853 if (event == NULL) {
1859 * Synthesize NAMESPACES event for the command specified.
1861 perf_event__synthesize_namespaces(tool, event,
1862 rec->evlist->workload.pid,
1863 tgid, process_synthesized_event,
1867 evlist__start_workload(rec->evlist);
1870 if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack))
1873 if (opts->initial_delay) {
1874 pr_info(EVLIST_DISABLED_MSG);
1875 if (opts->initial_delay > 0) {
1876 usleep(opts->initial_delay * USEC_PER_MSEC);
1877 evlist__enable(rec->evlist);
1878 pr_info(EVLIST_ENABLED_MSG);
1882 trigger_ready(&auxtrace_snapshot_trigger);
1883 trigger_ready(&switch_output_trigger);
1884 perf_hooks__invoke_record_start();
1886 unsigned long long hits = rec->samples;
1889 * rec->evlist->bkw_mmap_state is possible to be
1890 * BKW_MMAP_EMPTY here: when done == true and
1891 * hits != rec->samples in previous round.
1893 * evlist__toggle_bkw_mmap ensure we never
1894 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1896 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1897 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1899 if (record__mmap_read_all(rec, false) < 0) {
1900 trigger_error(&auxtrace_snapshot_trigger);
1901 trigger_error(&switch_output_trigger);
1906 if (auxtrace_record__snapshot_started) {
1907 auxtrace_record__snapshot_started = 0;
1908 if (!trigger_is_error(&auxtrace_snapshot_trigger))
1909 record__read_auxtrace_snapshot(rec, false);
1910 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
1911 pr_err("AUX area tracing snapshot failed\n");
1917 if (trigger_is_hit(&switch_output_trigger)) {
1919 * If switch_output_trigger is hit, the data in
1920 * overwritable ring buffer should have been collected,
1921 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1923 * If SIGUSR2 raise after or during record__mmap_read_all(),
1924 * record__mmap_read_all() didn't collect data from
1925 * overwritable ring buffer. Read again.
1927 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1929 trigger_ready(&switch_output_trigger);
1932 * Reenable events in overwrite ring buffer after
1933 * record__mmap_read_all(): we should have collected
1936 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1939 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1942 fd = record__switch_output(rec, false);
1944 pr_err("Failed to switch to new file\n");
1945 trigger_error(&switch_output_trigger);
1950 /* re-arm the alarm */
1951 if (rec->switch_output.time)
1952 alarm(rec->switch_output.time);
1955 if (hits == rec->samples) {
1956 if (done || draining)
1958 err = evlist__poll(rec->evlist, -1);
1960 * Propagate error, only if there's any. Ignore positive
1961 * number of returned events and interrupt error.
1963 if (err > 0 || (err < 0 && errno == EINTR))
1967 if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1971 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
1973 case EVLIST_CTL_CMD_SNAPSHOT:
1974 hit_auxtrace_snapshot_trigger(rec);
1975 evlist__ctlfd_ack(rec->evlist);
1977 case EVLIST_CTL_CMD_STOP:
1980 case EVLIST_CTL_CMD_ACK:
1981 case EVLIST_CTL_CMD_UNSUPPORTED:
1982 case EVLIST_CTL_CMD_ENABLE:
1983 case EVLIST_CTL_CMD_DISABLE:
1984 case EVLIST_CTL_CMD_EVLIST:
1985 case EVLIST_CTL_CMD_PING:
1992 * When perf is starting the traced process, at the end events
1993 * die with the process and we wait for that. Thus no need to
1994 * disable events in this case.
1996 if (done && !disabled && !target__none(&opts->target)) {
1997 trigger_off(&auxtrace_snapshot_trigger);
1998 evlist__disable(rec->evlist);
2003 trigger_off(&auxtrace_snapshot_trigger);
2004 trigger_off(&switch_output_trigger);
2006 if (opts->auxtrace_snapshot_on_exit)
2007 record__auxtrace_snapshot_exit(rec);
2009 if (forks && workload_exec_errno) {
2010 char msg[STRERR_BUFSIZE], strevsels[2048];
2011 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
2013 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2015 pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
2016 strevsels, argv[0], emsg);
2022 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
2024 if (target__none(&rec->opts.target))
2025 record__synthesize_workload(rec, true);
2028 evlist__finalize_ctlfd(rec->evlist);
2029 record__mmap_read_all(rec, true);
2030 record__aio_mmap_read_sync(rec);
2032 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2033 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2034 session->header.env.comp_ratio = ratio + 0.5;
2040 if (!child_finished)
2041 kill(rec->evlist->workload.pid, SIGTERM);
2047 else if (WIFEXITED(exit_status))
2048 status = WEXITSTATUS(exit_status);
2049 else if (WIFSIGNALED(exit_status))
2050 signr = WTERMSIG(exit_status);
2054 record__synthesize(rec, true);
2055 /* this will be recalculated during process_buildids() */
2059 if (!rec->timestamp_filename) {
2060 record__finish_output(rec);
2062 fd = record__switch_output(rec, true);
2065 goto out_delete_session;
2070 perf_hooks__invoke_record_end();
2072 if (!err && !quiet) {
2074 const char *postfix = rec->timestamp_filename ?
2075 ".<timestamp>" : "";
2077 if (rec->samples && !rec->opts.full_auxtrace)
2078 scnprintf(samples, sizeof(samples),
2079 " (%" PRIu64 " samples)", rec->samples);
2083 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
2084 perf_data__size(data) / 1024.0 / 1024.0,
2085 data->path, postfix, samples);
2087 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
2088 rec->session->bytes_transferred / 1024.0 / 1024.0,
2091 fprintf(stderr, " ]\n");
2095 #ifdef HAVE_EVENTFD_SUPPORT
2099 zstd_fini(&session->zstd_data);
2100 perf_session__delete(session);
2102 if (!opts->no_bpf_event)
2103 evlist__stop_sb_thread(rec->sb_evlist);
2107 static void callchain_debug(struct callchain_param *callchain)
2109 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
2111 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
2113 if (callchain->record_mode == CALLCHAIN_DWARF)
2114 pr_debug("callchain: stack dump size %d\n",
2115 callchain->dump_size);
2118 int record_opts__parse_callchain(struct record_opts *record,
2119 struct callchain_param *callchain,
2120 const char *arg, bool unset)
2123 callchain->enabled = !unset;
2125 /* --no-call-graph */
2127 callchain->record_mode = CALLCHAIN_NONE;
2128 pr_debug("callchain: disabled\n");
2132 ret = parse_callchain_record_opt(arg, callchain);
2134 /* Enable data address sampling for DWARF unwind. */
2135 if (callchain->record_mode == CALLCHAIN_DWARF)
2136 record->sample_address = true;
2137 callchain_debug(callchain);
2143 int record_parse_callchain_opt(const struct option *opt,
2147 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
2150 int record_callchain_opt(const struct option *opt,
2151 const char *arg __maybe_unused,
2152 int unset __maybe_unused)
2154 struct callchain_param *callchain = opt->value;
2156 callchain->enabled = true;
2158 if (callchain->record_mode == CALLCHAIN_NONE)
2159 callchain->record_mode = CALLCHAIN_FP;
2161 callchain_debug(callchain);
2165 static int perf_record_config(const char *var, const char *value, void *cb)
2167 struct record *rec = cb;
2169 if (!strcmp(var, "record.build-id")) {
2170 if (!strcmp(value, "cache"))
2171 rec->no_buildid_cache = false;
2172 else if (!strcmp(value, "no-cache"))
2173 rec->no_buildid_cache = true;
2174 else if (!strcmp(value, "skip"))
2175 rec->no_buildid = true;
2176 else if (!strcmp(value, "mmap"))
2177 rec->buildid_mmap = true;
2182 if (!strcmp(var, "record.call-graph")) {
2183 var = "call-graph.record-mode";
2184 return perf_default_config(var, value, cb);
2186 #ifdef HAVE_AIO_SUPPORT
2187 if (!strcmp(var, "record.aio")) {
2188 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2189 if (!rec->opts.nr_cblocks)
2190 rec->opts.nr_cblocks = nr_cblocks_default;
2198 static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2200 struct record_opts *opts = (struct record_opts *)opt->value;
2205 if (!strcasecmp(str, "node"))
2206 opts->affinity = PERF_AFFINITY_NODE;
2207 else if (!strcasecmp(str, "cpu"))
2208 opts->affinity = PERF_AFFINITY_CPU;
2213 static int parse_output_max_size(const struct option *opt,
2214 const char *str, int unset)
2216 unsigned long *s = (unsigned long *)opt->value;
2217 static struct parse_tag tags_size[] = {
2218 { .tag = 'B', .mult = 1 },
2219 { .tag = 'K', .mult = 1 << 10 },
2220 { .tag = 'M', .mult = 1 << 20 },
2221 { .tag = 'G', .mult = 1 << 30 },
2231 val = parse_tag_value(str, tags_size);
2232 if (val != (unsigned long) -1) {
2240 static int record__parse_mmap_pages(const struct option *opt,
2242 int unset __maybe_unused)
2244 struct record_opts *opts = opt->value;
2246 unsigned int mmap_pages;
2261 ret = __evlist__parse_mmap_pages(&mmap_pages, s);
2264 opts->mmap_pages = mmap_pages;
2272 ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
2276 opts->auxtrace_mmap_pages = mmap_pages;
2283 static int parse_control_option(const struct option *opt,
2285 int unset __maybe_unused)
2287 struct record_opts *opts = opt->value;
2289 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
2292 static void switch_output_size_warn(struct record *rec)
2294 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
2295 struct switch_output *s = &rec->switch_output;
2299 if (s->size < wakeup_size) {
2302 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
2303 pr_warning("WARNING: switch-output data size lower than "
2304 "wakeup kernel buffer size (%s) "
2305 "expect bigger perf.data sizes\n", buf);
2309 static int switch_output_setup(struct record *rec)
2311 struct switch_output *s = &rec->switch_output;
2312 static struct parse_tag tags_size[] = {
2313 { .tag = 'B', .mult = 1 },
2314 { .tag = 'K', .mult = 1 << 10 },
2315 { .tag = 'M', .mult = 1 << 20 },
2316 { .tag = 'G', .mult = 1 << 30 },
2319 static struct parse_tag tags_time[] = {
2320 { .tag = 's', .mult = 1 },
2321 { .tag = 'm', .mult = 60 },
2322 { .tag = 'h', .mult = 60*60 },
2323 { .tag = 'd', .mult = 60*60*24 },
2329 * If we're using --switch-output-events, then we imply its
2330 * --switch-output=signal, as we'll send a SIGUSR2 from the side band
2331 * thread to its parent.
2333 if (rec->switch_output_event_set)
2339 if (!strcmp(s->str, "signal")) {
2342 pr_debug("switch-output with SIGUSR2 signal\n");
2346 val = parse_tag_value(s->str, tags_size);
2347 if (val != (unsigned long) -1) {
2349 pr_debug("switch-output with %s size threshold\n", s->str);
2353 val = parse_tag_value(s->str, tags_time);
2354 if (val != (unsigned long) -1) {
2356 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2364 rec->timestamp_filename = true;
2367 if (s->size && !rec->opts.no_buffering)
2368 switch_output_size_warn(rec);
2373 static const char * const __record_usage[] = {
2374 "perf record [<options>] [<command>]",
2375 "perf record [<options>] -- <command> [<options>]",
2378 const char * const *record_usage = __record_usage;
2380 static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
2381 struct perf_sample *sample, struct machine *machine)
2384 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2385 * no need to add them twice.
2387 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2389 return perf_event__process_mmap(tool, event, sample, machine);
2392 static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
2393 struct perf_sample *sample, struct machine *machine)
2396 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2397 * no need to add them twice.
2399 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2402 return perf_event__process_mmap2(tool, event, sample, machine);
2406 * XXX Ideally would be local to cmd_record() and passed to a record__new
2407 * because we need to have access to it in record__exit, that is called
2408 * after cmd_record() exits, but since record_options need to be accessible to
2409 * builtin-script, leave it here.
2411 * At least we don't ouch it in all the other functions here directly.
2413 * Just say no to tons of global variables, sigh.
2415 static struct record record = {
2417 .sample_time = true,
2418 .mmap_pages = UINT_MAX,
2419 .user_freq = UINT_MAX,
2420 .user_interval = ULLONG_MAX,
2424 .default_per_cpu = true,
2426 .mmap_flush = MMAP_FLUSH_DEFAULT,
2427 .nr_threads_synthesize = 1,
2432 .sample = process_sample_event,
2433 .fork = perf_event__process_fork,
2434 .exit = perf_event__process_exit,
2435 .comm = perf_event__process_comm,
2436 .namespaces = perf_event__process_namespaces,
2437 .mmap = build_id__process_mmap,
2438 .mmap2 = build_id__process_mmap2,
2439 .ordered_events = true,
2443 const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2444 "\n\t\t\t\tDefault: fp";
2446 static bool dry_run;
2449 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2450 * with it and switch to use the library functions in perf_evlist that came
2451 * from builtin-record.c, i.e. use record_opts,
2452 * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2455 static struct option __record_options[] = {
2456 OPT_CALLBACK('e', "event", &record.evlist, "event",
2457 "event selector. use 'perf list' to list available events",
2458 parse_events_option),
2459 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
2460 "event filter", parse_filter),
2461 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2462 NULL, "don't record events from perf itself",
2464 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
2465 "record events on existing process id"),
2466 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
2467 "record events on existing thread id"),
2468 OPT_INTEGER('r', "realtime", &record.realtime_prio,
2469 "collect data with this RT SCHED_FIFO priority"),
2470 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
2471 "collect data without buffering"),
2472 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
2473 "collect raw sample records from all opened counters"),
2474 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
2475 "system-wide collection from all CPUs"),
2476 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
2477 "list of cpus to monitor"),
2478 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
2479 OPT_STRING('o', "output", &record.data.path, "file",
2480 "output file name"),
2481 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2482 &record.opts.no_inherit_set,
2483 "child tasks do not inherit counters"),
2484 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2485 "synthesize non-sample events at the end of output"),
2486 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
2487 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
2488 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2489 "Fail if the specified frequency can't be used"),
2490 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2491 "profile at this frequency",
2492 record__parse_freq),
2493 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2494 "number of mmap data pages and AUX area tracing mmap pages",
2495 record__parse_mmap_pages),
2496 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2497 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2498 record__mmap_flush_parse),
2499 OPT_BOOLEAN(0, "group", &record.opts.group,
2500 "put the counters into a counter group"),
2501 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
2502 NULL, "enables call-graph recording" ,
2503 &record_callchain_opt),
2504 OPT_CALLBACK(0, "call-graph", &record.opts,
2505 "record_mode[,record_size]", record_callchain_help,
2506 &record_parse_callchain_opt),
2507 OPT_INCR('v', "verbose", &verbose,
2508 "be more verbose (show counter open errors, etc)"),
2509 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
2510 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
2511 "per thread counts"),
2512 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
2513 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2514 "Record the sample physical addresses"),
2515 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
2516 "Record the sampled data address data page size"),
2517 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
2518 "Record the sampled code address (ip) page size"),
2519 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
2520 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2521 &record.opts.sample_time_set,
2522 "Record the sample timestamps"),
2523 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2524 "Record the sample period"),
2525 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
2527 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2528 &record.no_buildid_cache_set,
2529 "do not update the buildid cache"),
2530 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2531 &record.no_buildid_set,
2532 "do not collect buildids in perf.data"),
2533 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
2534 "monitor event in cgroup name only",
2536 OPT_INTEGER('D', "delay", &record.opts.initial_delay,
2537 "ms to wait before starting measurement after program start (-1: start with events disabled)"),
2538 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
2539 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2542 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2543 "branch any", "sample any taken branches",
2544 parse_branch_stack),
2546 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2547 "branch filter mask", "branch stack filter modes",
2548 parse_branch_stack),
2549 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2550 "sample by weight (on special events only)"),
2551 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2552 "sample transaction flags (special events only)"),
2553 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2554 "use per-thread mmaps"),
2555 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2556 "sample selected machine registers on interrupt,"
2557 " use '-I?' to list register names", parse_intr_regs),
2558 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2559 "sample selected machine registers on interrupt,"
2560 " use '--user-regs=?' to list register names", parse_user_regs),
2561 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2562 "Record running/enabled time of read (:S) events"),
2563 OPT_CALLBACK('k', "clockid", &record.opts,
2564 "clockid", "clockid to use for events, see clock_gettime()",
2566 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2567 "opts", "AUX area tracing Snapshot Mode", ""),
2568 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
2569 "opts", "sample AUX area", ""),
2570 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
2571 "per thread proc mmap processing timeout in ms"),
2572 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2573 "Record namespaces events"),
2574 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
2575 "Record cgroup events"),
2576 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
2577 &record.opts.record_switch_events_set,
2578 "Record context switch events"),
2579 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2580 "Configure all used events to run in kernel space.",
2581 PARSE_OPT_EXCLUSIVE),
2582 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2583 "Configure all used events to run in user space.",
2584 PARSE_OPT_EXCLUSIVE),
2585 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2586 "collect kernel callchains"),
2587 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2588 "collect user callchains"),
2589 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2590 "clang binary to use for compiling BPF scriptlets"),
2591 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2592 "options passed to clang when compiling BPF scriptlets"),
2593 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2594 "file", "vmlinux pathname"),
2595 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2596 "Record build-id of all DSOs regardless of hits"),
2597 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
2598 "Record build-id in map events"),
2599 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2600 "append timestamp to output filename"),
2601 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2602 "Record timestamp boundary (time of first/last samples)"),
2603 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
2604 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2605 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
2607 OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
2608 "switch output event selector. use 'perf list' to list available events",
2609 parse_events_option_new_evlist),
2610 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2611 "Limit number of switch output generated files"),
2612 OPT_BOOLEAN(0, "dry-run", &dry_run,
2613 "Parse options then exit"),
2614 #ifdef HAVE_AIO_SUPPORT
2615 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2616 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
2619 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2620 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2621 record__parse_affinity),
2622 #ifdef HAVE_ZSTD_SUPPORT
2623 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2624 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2625 record__parse_comp_level),
2627 OPT_CALLBACK(0, "max-size", &record.output_max_size,
2628 "size", "Limit the maximum size of the output file", parse_output_max_size),
2629 OPT_UINTEGER(0, "num-thread-synthesize",
2630 &record.opts.nr_threads_synthesize,
2631 "number of threads to run for event synthesis"),
2633 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
2634 "libpfm4 event selector. use 'perf list' to list available events",
2635 parse_libpfm_events_option),
2637 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
2638 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
2639 "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
2640 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
2641 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
2642 parse_control_option),
2646 struct option *record_options = __record_options;
2648 int cmd_record(int argc, const char **argv)
2651 struct record *rec = &record;
2652 char errbuf[BUFSIZ];
2654 setlocale(LC_ALL, "");
2656 #ifndef HAVE_LIBBPF_SUPPORT
2657 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2658 set_nobuild('\0', "clang-path", true);
2659 set_nobuild('\0', "clang-opt", true);
2663 #ifndef HAVE_BPF_PROLOGUE
2664 # if !defined (HAVE_DWARF_SUPPORT)
2665 # define REASON "NO_DWARF=1"
2666 # elif !defined (HAVE_LIBBPF_SUPPORT)
2667 # define REASON "NO_LIBBPF=1"
2669 # define REASON "this architecture doesn't support BPF prologue"
2671 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2672 set_nobuild('\0', "vmlinux", true);
2677 rec->opts.affinity = PERF_AFFINITY_SYS;
2679 rec->evlist = evlist__new();
2680 if (rec->evlist == NULL)
2683 err = perf_config(perf_record_config, rec);
2687 argc = parse_options(argc, argv, record_options, record_usage,
2688 PARSE_OPT_STOP_AT_NON_OPTION);
2690 perf_quiet_option();
2692 /* Make system wide (-a) the default target. */
2693 if (!argc && target__none(&rec->opts.target))
2694 rec->opts.target.system_wide = true;
2696 if (nr_cgroups && !rec->opts.target.system_wide) {
2697 usage_with_options_msg(record_usage, record_options,
2698 "cgroup monitoring only available in system-wide mode");
2702 if (rec->buildid_mmap) {
2703 if (!perf_can_record_build_id()) {
2704 pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
2708 pr_debug("Enabling build id in mmap2 events.\n");
2709 /* Enable mmap build id synthesizing. */
2710 symbol_conf.buildid_mmap2 = true;
2711 /* Enable perf_event_attr::build_id bit. */
2712 rec->opts.build_id = true;
2713 /* Disable build id cache. */
2714 rec->no_buildid = true;
2717 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
2718 pr_err("Kernel has no cgroup sampling support.\n");
2723 if (rec->opts.kcore)
2724 rec->data.is_dir = true;
2726 if (rec->opts.comp_level != 0) {
2727 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2728 rec->no_buildid = true;
2731 if (rec->opts.record_switch_events &&
2732 !perf_can_record_switch_events()) {
2733 ui__error("kernel does not support recording context switch events\n");
2734 parse_options_usage(record_usage, record_options, "switch-events", 0);
2739 if (switch_output_setup(rec)) {
2740 parse_options_usage(record_usage, record_options, "switch-output", 0);
2745 if (rec->switch_output.time) {
2746 signal(SIGALRM, alarm_sig_handler);
2747 alarm(rec->switch_output.time);
2750 if (rec->switch_output.num_files) {
2751 rec->switch_output.filenames = calloc(sizeof(char *),
2752 rec->switch_output.num_files);
2753 if (!rec->switch_output.filenames) {
2760 * Allow aliases to facilitate the lookup of symbols for address
2761 * filters. Refer to auxtrace_parse_filters().
2763 symbol_conf.allow_aliases = true;
2767 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
2768 rec->affinity_mask.nbits = cpu__max_cpu();
2769 rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
2770 if (!rec->affinity_mask.bits) {
2771 pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
2775 pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
2778 err = record__auxtrace_init(rec);
2785 err = bpf__setup_stdout(rec->evlist);
2787 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2788 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2795 if (rec->no_buildid_cache || rec->no_buildid) {
2796 disable_buildid_cache();
2797 } else if (rec->switch_output.enabled) {
2799 * In 'perf record --switch-output', disable buildid
2800 * generation by default to reduce data file switching
2801 * overhead. Still generate buildid if they are required
2804 * perf record --switch-output --no-no-buildid \
2805 * --no-no-buildid-cache
2807 * Following code equals to:
2809 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2810 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2811 * disable_buildid_cache();
2813 bool disable = true;
2815 if (rec->no_buildid_set && !rec->no_buildid)
2817 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2820 rec->no_buildid = true;
2821 rec->no_buildid_cache = true;
2822 disable_buildid_cache();
2826 if (record.opts.overwrite)
2827 record.opts.tail_synthesize = true;
2829 if (rec->evlist->core.nr_entries == 0) {
2830 if (perf_pmu__has_hybrid()) {
2831 err = evlist__add_default_hybrid(rec->evlist,
2832 !record.opts.no_samples);
2834 err = __evlist__add_default(rec->evlist,
2835 !record.opts.no_samples);
2839 pr_err("Not enough memory for event selector list\n");
2844 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2845 rec->opts.no_inherit = true;
2847 err = target__validate(&rec->opts.target);
2849 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
2850 ui__warning("%s\n", errbuf);
2853 err = target__parse_uid(&rec->opts.target);
2855 int saved_errno = errno;
2857 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
2858 ui__error("%s", errbuf);
2864 /* Enable ignoring missing threads when -u/-p option is defined. */
2865 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
2868 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
2869 usage_with_options(record_usage, record_options);
2871 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2876 * We take all buildids when the file contains
2877 * AUX area tracing data because we do not decode the
2878 * trace because it would take too long.
2880 if (rec->opts.full_auxtrace)
2881 rec->buildid_all = true;
2883 if (rec->opts.text_poke) {
2884 err = record__config_text_poke(rec->evlist);
2886 pr_err("record__config_text_poke failed, error %d\n", err);
2891 if (record_opts__config(&rec->opts)) {
2896 if (rec->opts.nr_cblocks > nr_cblocks_max)
2897 rec->opts.nr_cblocks = nr_cblocks_max;
2898 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2900 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
2901 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
2903 if (rec->opts.comp_level > comp_level_max)
2904 rec->opts.comp_level = comp_level_max;
2905 pr_debug("comp level: %d\n", rec->opts.comp_level);
2907 err = __cmd_record(&record, argc, argv);
2909 bitmap_free(rec->affinity_mask.bits);
2910 evlist__delete(rec->evlist);
2912 auxtrace_record__free(rec->itr);
2914 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
2918 static void snapshot_sig_handler(int sig __maybe_unused)
2920 struct record *rec = &record;
2922 hit_auxtrace_snapshot_trigger(rec);
2924 if (switch_output_signal(rec))
2925 trigger_hit(&switch_output_trigger);
2928 static void alarm_sig_handler(int sig __maybe_unused)
2930 struct record *rec = &record;
2932 if (switch_output_time(rec))
2933 trigger_hit(&switch_output_trigger);