1 // SPDX-License-Identifier: GPL-2.0
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
13 #include "util/build-id.h"
14 #include "util/util.h"
15 #include <subcmd/parse-options.h>
16 #include "util/parse-events.h"
17 #include "util/config.h"
19 #include "util/callchain.h"
20 #include "util/cgroup.h"
21 #include "util/header.h"
22 #include "util/event.h"
23 #include "util/evlist.h"
24 #include "util/evsel.h"
25 #include "util/debug.h"
26 #include "util/drv_configs.h"
27 #include "util/session.h"
28 #include "util/tool.h"
29 #include "util/symbol.h"
30 #include "util/cpumap.h"
31 #include "util/thread_map.h"
32 #include "util/data.h"
33 #include "util/perf_regs.h"
34 #include "util/auxtrace.h"
36 #include "util/parse-branch-options.h"
37 #include "util/parse-regs-options.h"
38 #include "util/llvm-utils.h"
39 #include "util/bpf-loader.h"
40 #include "util/trigger.h"
41 #include "util/perf-hooks.h"
42 #include "util/time-utils.h"
43 #include "util/units.h"
55 #include <linux/time64.h>
57 struct switch_output {
67 struct perf_tool tool;
68 struct record_opts opts;
70 struct perf_data data;
71 struct auxtrace_record *itr;
72 struct perf_evlist *evlist;
73 struct perf_session *session;
77 bool no_buildid_cache;
78 bool no_buildid_cache_set;
80 bool timestamp_filename;
81 bool timestamp_boundary;
82 struct switch_output switch_output;
83 unsigned long long samples;
86 static volatile int auxtrace_record__snapshot_started;
87 static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
88 static DEFINE_TRIGGER(switch_output_trigger);
90 static bool switch_output_signal(struct record *rec)
92 return rec->switch_output.signal &&
93 trigger_is_ready(&switch_output_trigger);
96 static bool switch_output_size(struct record *rec)
98 return rec->switch_output.size &&
99 trigger_is_ready(&switch_output_trigger) &&
100 (rec->bytes_written >= rec->switch_output.size);
103 static bool switch_output_time(struct record *rec)
105 return rec->switch_output.time &&
106 trigger_is_ready(&switch_output_trigger);
109 static int record__write(struct record *rec, void *bf, size_t size)
111 if (perf_data__write(rec->session->data, bf, size) < 0) {
112 pr_err("failed to write perf data, error: %m\n");
116 rec->bytes_written += size;
118 if (switch_output_size(rec))
119 trigger_hit(&switch_output_trigger);
124 static int process_synthesized_event(struct perf_tool *tool,
125 union perf_event *event,
126 struct perf_sample *sample __maybe_unused,
127 struct machine *machine __maybe_unused)
129 struct record *rec = container_of(tool, struct record, tool);
130 return record__write(rec, event, event->header.size);
133 static int record__pushfn(void *to, void *bf, size_t size)
135 struct record *rec = to;
138 return record__write(rec, bf, size);
141 static volatile int done;
142 static volatile int signr = -1;
143 static volatile int child_finished;
145 static void sig_handler(int sig)
155 static void sigsegv_handler(int sig)
157 perf_hooks__recover();
158 sighandler_dump_stack(sig);
161 static void record__sig_exit(void)
166 signal(signr, SIG_DFL);
170 #ifdef HAVE_AUXTRACE_SUPPORT
172 static int record__process_auxtrace(struct perf_tool *tool,
173 union perf_event *event, void *data1,
174 size_t len1, void *data2, size_t len2)
176 struct record *rec = container_of(tool, struct record, tool);
177 struct perf_data *data = &rec->data;
181 if (!perf_data__is_pipe(data)) {
183 int fd = perf_data__fd(data);
186 file_offset = lseek(fd, 0, SEEK_CUR);
187 if (file_offset == -1)
189 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
195 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
196 padding = (len1 + len2) & 7;
198 padding = 8 - padding;
200 record__write(rec, event, event->header.size);
201 record__write(rec, data1, len1);
203 record__write(rec, data2, len2);
204 record__write(rec, &pad, padding);
209 static int record__auxtrace_mmap_read(struct record *rec,
210 struct auxtrace_mmap *mm)
214 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
215 record__process_auxtrace);
225 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
226 struct auxtrace_mmap *mm)
230 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
231 record__process_auxtrace,
232 rec->opts.auxtrace_snapshot_size);
242 static int record__auxtrace_read_snapshot_all(struct record *rec)
247 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
248 struct auxtrace_mmap *mm =
249 &rec->evlist->mmap[i].auxtrace_mmap;
254 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
263 static void record__read_auxtrace_snapshot(struct record *rec)
265 pr_debug("Recording AUX area tracing snapshot\n");
266 if (record__auxtrace_read_snapshot_all(rec) < 0) {
267 trigger_error(&auxtrace_snapshot_trigger);
269 if (auxtrace_record__snapshot_finish(rec->itr))
270 trigger_error(&auxtrace_snapshot_trigger);
272 trigger_ready(&auxtrace_snapshot_trigger);
276 static int record__auxtrace_init(struct record *rec)
281 rec->itr = auxtrace_record__init(rec->evlist, &err);
286 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
287 rec->opts.auxtrace_snapshot_opts);
291 return auxtrace_parse_filters(rec->evlist);
297 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
298 struct auxtrace_mmap *mm __maybe_unused)
304 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
309 int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
314 static int record__auxtrace_init(struct record *rec __maybe_unused)
321 static int record__mmap_evlist(struct record *rec,
322 struct perf_evlist *evlist)
324 struct record_opts *opts = &rec->opts;
327 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
328 opts->auxtrace_mmap_pages,
329 opts->auxtrace_snapshot_mode) < 0) {
330 if (errno == EPERM) {
331 pr_err("Permission error mapping pages.\n"
332 "Consider increasing "
333 "/proc/sys/kernel/perf_event_mlock_kb,\n"
334 "or try again with a smaller value of -m/--mmap_pages.\n"
335 "(current value: %u,%u)\n",
336 opts->mmap_pages, opts->auxtrace_mmap_pages);
339 pr_err("failed to mmap with %d (%s)\n", errno,
340 str_error_r(errno, msg, sizeof(msg)));
350 static int record__mmap(struct record *rec)
352 return record__mmap_evlist(rec, rec->evlist);
355 static int record__open(struct record *rec)
358 struct perf_evsel *pos;
359 struct perf_evlist *evlist = rec->evlist;
360 struct perf_session *session = rec->session;
361 struct record_opts *opts = &rec->opts;
362 struct perf_evsel_config_term *err_term;
366 * For initial_delay we need to add a dummy event so that we can track
367 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
368 * real events, the ones asked by the user.
370 if (opts->initial_delay) {
371 if (perf_evlist__add_dummy(evlist))
374 pos = perf_evlist__first(evlist);
376 pos = perf_evlist__last(evlist);
378 pos->attr.enable_on_exec = 1;
381 perf_evlist__config(evlist, opts, &callchain_param);
383 evlist__for_each_entry(evlist, pos) {
385 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
386 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
388 ui__warning("%s\n", msg);
393 perf_evsel__open_strerror(pos, &opts->target,
394 errno, msg, sizeof(msg));
395 ui__error("%s\n", msg);
399 pos->supported = true;
402 if (perf_evlist__apply_filters(evlist, &pos)) {
403 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
404 pos->filter, perf_evsel__name(pos), errno,
405 str_error_r(errno, msg, sizeof(msg)));
410 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
411 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
412 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
413 str_error_r(errno, msg, sizeof(msg)));
418 rc = record__mmap(rec);
422 session->evlist = evlist;
423 perf_session__set_id_hdr_size(session);
428 static int process_sample_event(struct perf_tool *tool,
429 union perf_event *event,
430 struct perf_sample *sample,
431 struct perf_evsel *evsel,
432 struct machine *machine)
434 struct record *rec = container_of(tool, struct record, tool);
436 if (rec->evlist->first_sample_time == 0)
437 rec->evlist->first_sample_time = sample->time;
439 rec->evlist->last_sample_time = sample->time;
441 if (rec->buildid_all)
445 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
448 static int process_buildids(struct record *rec)
450 struct perf_data *data = &rec->data;
451 struct perf_session *session = rec->session;
457 * During this process, it'll load kernel map and replace the
458 * dso->long_name to a real pathname it found. In this case
459 * we prefer the vmlinux path like
460 * /lib/modules/3.16.4/build/vmlinux
462 * rather than build-id path (in debug directory).
463 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
465 symbol_conf.ignore_vmlinux_buildid = true;
468 * If --buildid-all is given, it marks all DSO regardless of hits,
469 * so no need to process samples. But if timestamp_boundary is enabled,
470 * it still needs to walk on all samples to get the timestamps of
471 * first/last samples.
473 if (rec->buildid_all && !rec->timestamp_boundary)
474 rec->tool.sample = NULL;
476 return perf_session__process_events(session);
479 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
482 struct perf_tool *tool = data;
484 *As for guest kernel when processing subcommand record&report,
485 *we arrange module mmap prior to guest kernel mmap and trigger
486 *a preload dso because default guest module symbols are loaded
487 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
488 *method is used to avoid symbol missing when the first addr is
489 *in module instead of in guest kernel.
491 err = perf_event__synthesize_modules(tool, process_synthesized_event,
494 pr_err("Couldn't record guest kernel [%d]'s reference"
495 " relocation symbol.\n", machine->pid);
498 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
499 * have no _text sometimes.
501 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
504 pr_err("Couldn't record guest kernel [%d]'s reference"
505 " relocation symbol.\n", machine->pid);
508 static struct perf_event_header finished_round_event = {
509 .size = sizeof(struct perf_event_header),
510 .type = PERF_RECORD_FINISHED_ROUND,
513 static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
516 u64 bytes_written = rec->bytes_written;
519 struct perf_mmap *maps;
524 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
528 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
531 for (i = 0; i < evlist->nr_mmaps; i++) {
532 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
535 if (perf_mmap__push(&maps[i], rec, record__pushfn) != 0) {
541 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
542 record__auxtrace_mmap_read(rec, mm) != 0) {
549 * Mark the round finished in case we wrote
550 * at least one event.
552 if (bytes_written != rec->bytes_written)
553 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
556 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
561 static int record__mmap_read_all(struct record *rec)
565 err = record__mmap_read_evlist(rec, rec->evlist, false);
569 return record__mmap_read_evlist(rec, rec->evlist, true);
572 static void record__init_features(struct record *rec)
574 struct perf_session *session = rec->session;
577 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
578 perf_header__set_feat(&session->header, feat);
581 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
583 if (!have_tracepoints(&rec->evlist->entries))
584 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
586 if (!rec->opts.branch_stack)
587 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
589 if (!rec->opts.full_auxtrace)
590 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
592 perf_header__clear_feat(&session->header, HEADER_STAT);
596 record__finish_output(struct record *rec)
598 struct perf_data *data = &rec->data;
599 int fd = perf_data__fd(data);
604 rec->session->header.data_size += rec->bytes_written;
605 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
607 if (!rec->no_buildid) {
608 process_buildids(rec);
610 if (rec->buildid_all)
611 dsos__hit_all(rec->session);
613 perf_session__write_header(rec->session, rec->evlist, fd, true);
618 static int record__synthesize_workload(struct record *rec, bool tail)
621 struct thread_map *thread_map;
623 if (rec->opts.tail_synthesize != tail)
626 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
627 if (thread_map == NULL)
630 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
631 process_synthesized_event,
632 &rec->session->machines.host,
633 rec->opts.sample_address,
634 rec->opts.proc_map_timeout);
635 thread_map__put(thread_map);
639 static int record__synthesize(struct record *rec, bool tail);
642 record__switch_output(struct record *rec, bool at_exit)
644 struct perf_data *data = &rec->data;
647 /* Same Size: "2015122520103046"*/
648 char timestamp[] = "InvalidTimestamp";
650 record__synthesize(rec, true);
651 if (target__none(&rec->opts.target))
652 record__synthesize_workload(rec, true);
655 record__finish_output(rec);
656 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
658 pr_err("Failed to get current timestamp\n");
662 fd = perf_data__switch(data, timestamp,
663 rec->session->header.data_offset,
665 if (fd >= 0 && !at_exit) {
666 rec->bytes_written = 0;
667 rec->session->header.data_size = 0;
671 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
672 data->file.path, timestamp);
674 /* Output tracking events */
676 record__synthesize(rec, false);
679 * In 'perf record --switch-output' without -a,
680 * record__synthesize() in record__switch_output() won't
681 * generate tracking events because there's no thread_map
682 * in evlist. Which causes newly created perf.data doesn't
683 * contain map and comm information.
684 * Create a fake thread_map and directly call
685 * perf_event__synthesize_thread_map() for those events.
687 if (target__none(&rec->opts.target))
688 record__synthesize_workload(rec, false);
693 static volatile int workload_exec_errno;
696 * perf_evlist__prepare_workload will send a SIGUSR1
697 * if the fork fails, since we asked by setting its
698 * want_signal to true.
700 static void workload_exec_failed_signal(int signo __maybe_unused,
702 void *ucontext __maybe_unused)
704 workload_exec_errno = info->si_value.sival_int;
709 static void snapshot_sig_handler(int sig);
710 static void alarm_sig_handler(int sig);
713 perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
714 struct perf_tool *tool __maybe_unused,
715 perf_event__handler_t process __maybe_unused,
716 struct machine *machine __maybe_unused)
721 static const struct perf_event_mmap_page *
722 perf_evlist__pick_pc(struct perf_evlist *evlist)
725 if (evlist->mmap && evlist->mmap[0].base)
726 return evlist->mmap[0].base;
727 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
728 return evlist->overwrite_mmap[0].base;
733 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
735 const struct perf_event_mmap_page *pc;
737 pc = perf_evlist__pick_pc(rec->evlist);
743 static int record__synthesize(struct record *rec, bool tail)
745 struct perf_session *session = rec->session;
746 struct machine *machine = &session->machines.host;
747 struct perf_data *data = &rec->data;
748 struct record_opts *opts = &rec->opts;
749 struct perf_tool *tool = &rec->tool;
750 int fd = perf_data__fd(data);
753 if (rec->opts.tail_synthesize != tail)
758 * We need to synthesize events first, because some
759 * features works on top of them (on report side).
761 err = perf_event__synthesize_attrs(tool, session,
762 process_synthesized_event);
764 pr_err("Couldn't synthesize attrs.\n");
768 err = perf_event__synthesize_features(tool, session, rec->evlist,
769 process_synthesized_event);
771 pr_err("Couldn't synthesize features.\n");
775 if (have_tracepoints(&rec->evlist->entries)) {
777 * FIXME err <= 0 here actually means that
778 * there were no tracepoints so its not really
779 * an error, just that we don't need to
780 * synthesize anything. We really have to
781 * return this more properly and also
782 * propagate errors that now are calling die()
784 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
785 process_synthesized_event);
787 pr_err("Couldn't record tracing data.\n");
790 rec->bytes_written += err;
794 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
795 process_synthesized_event, machine);
799 if (rec->opts.full_auxtrace) {
800 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
801 session, process_synthesized_event);
806 if (!perf_evlist__exclude_kernel(rec->evlist)) {
807 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
809 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
810 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
811 "Check /proc/kallsyms permission or run as root.\n");
813 err = perf_event__synthesize_modules(tool, process_synthesized_event,
815 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
816 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
817 "Check /proc/modules permission or run as root.\n");
821 machines__process_guests(&session->machines,
822 perf_event__synthesize_guest_os, tool);
825 err = perf_event__synthesize_extra_attr(&rec->tool,
827 process_synthesized_event,
832 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
833 process_synthesized_event,
836 pr_err("Couldn't synthesize thread map.\n");
840 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
841 process_synthesized_event, NULL);
843 pr_err("Couldn't synthesize cpu map.\n");
847 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
848 process_synthesized_event, opts->sample_address,
849 opts->proc_map_timeout, 1);
854 static int __cmd_record(struct record *rec, int argc, const char **argv)
858 unsigned long waking = 0;
859 const bool forks = argc > 0;
860 struct perf_tool *tool = &rec->tool;
861 struct record_opts *opts = &rec->opts;
862 struct perf_data *data = &rec->data;
863 struct perf_session *session;
864 bool disabled = false, draining = false;
867 atexit(record__sig_exit);
868 signal(SIGCHLD, sig_handler);
869 signal(SIGINT, sig_handler);
870 signal(SIGTERM, sig_handler);
871 signal(SIGSEGV, sigsegv_handler);
873 if (rec->opts.record_namespaces)
874 tool->namespace_events = true;
876 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
877 signal(SIGUSR2, snapshot_sig_handler);
878 if (rec->opts.auxtrace_snapshot_mode)
879 trigger_on(&auxtrace_snapshot_trigger);
880 if (rec->switch_output.enabled)
881 trigger_on(&switch_output_trigger);
883 signal(SIGUSR2, SIG_IGN);
886 session = perf_session__new(data, false, tool);
887 if (session == NULL) {
888 pr_err("Perf session creation failed.\n");
892 fd = perf_data__fd(data);
893 rec->session = session;
895 record__init_features(rec);
898 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
900 workload_exec_failed_signal);
902 pr_err("Couldn't run the workload!\n");
904 goto out_delete_session;
909 * If we have just single event and are sending data
910 * through pipe, we need to force the ids allocation,
911 * because we synthesize event name through the pipe
912 * and need the id for that.
914 if (data->is_pipe && rec->evlist->nr_entries == 1)
915 rec->opts.sample_id = true;
917 if (record__open(rec) != 0) {
922 err = bpf__apply_obj_config();
926 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
927 pr_err("ERROR: Apply config to BPF failed: %s\n",
933 * Normally perf_session__new would do this, but it doesn't have the
936 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
937 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
938 rec->tool.ordered_events = false;
941 if (!rec->evlist->nr_groups)
942 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
945 err = perf_header__write_pipe(fd);
949 err = perf_session__write_header(session, rec->evlist, fd, false);
955 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
956 pr_err("Couldn't generate buildids. "
957 "Use --no-buildid to profile anyway.\n");
962 err = record__synthesize(rec, false);
966 if (rec->realtime_prio) {
967 struct sched_param param;
969 param.sched_priority = rec->realtime_prio;
970 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
971 pr_err("Could not set realtime priority.\n");
978 * When perf is starting the traced process, all the events
979 * (apart from group members) have enable_on_exec=1 set,
980 * so don't spoil it by prematurely enabling them.
982 if (!target__none(&opts->target) && !opts->initial_delay)
983 perf_evlist__enable(rec->evlist);
989 struct machine *machine = &session->machines.host;
990 union perf_event *event;
993 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1000 * Some H/W events are generated before COMM event
1001 * which is emitted during exec(), so perf script
1002 * cannot see a correct process name for those events.
1003 * Synthesize COMM event to prevent it.
1005 tgid = perf_event__synthesize_comm(tool, event,
1006 rec->evlist->workload.pid,
1007 process_synthesized_event,
1014 event = malloc(sizeof(event->namespaces) +
1015 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1016 machine->id_hdr_size);
1017 if (event == NULL) {
1023 * Synthesize NAMESPACES event for the command specified.
1025 perf_event__synthesize_namespaces(tool, event,
1026 rec->evlist->workload.pid,
1027 tgid, process_synthesized_event,
1031 perf_evlist__start_workload(rec->evlist);
1034 if (opts->initial_delay) {
1035 usleep(opts->initial_delay * USEC_PER_MSEC);
1036 perf_evlist__enable(rec->evlist);
1039 trigger_ready(&auxtrace_snapshot_trigger);
1040 trigger_ready(&switch_output_trigger);
1041 perf_hooks__invoke_record_start();
1043 unsigned long long hits = rec->samples;
1046 * rec->evlist->bkw_mmap_state is possible to be
1047 * BKW_MMAP_EMPTY here: when done == true and
1048 * hits != rec->samples in previous round.
1050 * perf_evlist__toggle_bkw_mmap ensure we never
1051 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1053 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1054 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1056 if (record__mmap_read_all(rec) < 0) {
1057 trigger_error(&auxtrace_snapshot_trigger);
1058 trigger_error(&switch_output_trigger);
1063 if (auxtrace_record__snapshot_started) {
1064 auxtrace_record__snapshot_started = 0;
1065 if (!trigger_is_error(&auxtrace_snapshot_trigger))
1066 record__read_auxtrace_snapshot(rec);
1067 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
1068 pr_err("AUX area tracing snapshot failed\n");
1074 if (trigger_is_hit(&switch_output_trigger)) {
1076 * If switch_output_trigger is hit, the data in
1077 * overwritable ring buffer should have been collected,
1078 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1080 * If SIGUSR2 raise after or during record__mmap_read_all(),
1081 * record__mmap_read_all() didn't collect data from
1082 * overwritable ring buffer. Read again.
1084 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1086 trigger_ready(&switch_output_trigger);
1089 * Reenable events in overwrite ring buffer after
1090 * record__mmap_read_all(): we should have collected
1093 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1096 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1099 fd = record__switch_output(rec, false);
1101 pr_err("Failed to switch to new file\n");
1102 trigger_error(&switch_output_trigger);
1107 /* re-arm the alarm */
1108 if (rec->switch_output.time)
1109 alarm(rec->switch_output.time);
1112 if (hits == rec->samples) {
1113 if (done || draining)
1115 err = perf_evlist__poll(rec->evlist, -1);
1117 * Propagate error, only if there's any. Ignore positive
1118 * number of returned events and interrupt error.
1120 if (err > 0 || (err < 0 && errno == EINTR))
1124 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1129 * When perf is starting the traced process, at the end events
1130 * die with the process and we wait for that. Thus no need to
1131 * disable events in this case.
1133 if (done && !disabled && !target__none(&opts->target)) {
1134 trigger_off(&auxtrace_snapshot_trigger);
1135 perf_evlist__disable(rec->evlist);
1139 trigger_off(&auxtrace_snapshot_trigger);
1140 trigger_off(&switch_output_trigger);
1142 if (forks && workload_exec_errno) {
1143 char msg[STRERR_BUFSIZE];
1144 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
1145 pr_err("Workload failed: %s\n", emsg);
1151 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
1153 if (target__none(&rec->opts.target))
1154 record__synthesize_workload(rec, true);
1160 if (!child_finished)
1161 kill(rec->evlist->workload.pid, SIGTERM);
1167 else if (WIFEXITED(exit_status))
1168 status = WEXITSTATUS(exit_status);
1169 else if (WIFSIGNALED(exit_status))
1170 signr = WTERMSIG(exit_status);
1174 record__synthesize(rec, true);
1175 /* this will be recalculated during process_buildids() */
1179 if (!rec->timestamp_filename) {
1180 record__finish_output(rec);
1182 fd = record__switch_output(rec, true);
1185 goto out_delete_session;
1190 perf_hooks__invoke_record_end();
1192 if (!err && !quiet) {
1194 const char *postfix = rec->timestamp_filename ?
1195 ".<timestamp>" : "";
1197 if (rec->samples && !rec->opts.full_auxtrace)
1198 scnprintf(samples, sizeof(samples),
1199 " (%" PRIu64 " samples)", rec->samples);
1203 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
1204 perf_data__size(data) / 1024.0 / 1024.0,
1205 data->file.path, postfix, samples);
1209 perf_session__delete(session);
1213 static void callchain_debug(struct callchain_param *callchain)
1215 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
1217 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
1219 if (callchain->record_mode == CALLCHAIN_DWARF)
1220 pr_debug("callchain: stack dump size %d\n",
1221 callchain->dump_size);
1224 int record_opts__parse_callchain(struct record_opts *record,
1225 struct callchain_param *callchain,
1226 const char *arg, bool unset)
1229 callchain->enabled = !unset;
1231 /* --no-call-graph */
1233 callchain->record_mode = CALLCHAIN_NONE;
1234 pr_debug("callchain: disabled\n");
1238 ret = parse_callchain_record_opt(arg, callchain);
1240 /* Enable data address sampling for DWARF unwind. */
1241 if (callchain->record_mode == CALLCHAIN_DWARF)
1242 record->sample_address = true;
1243 callchain_debug(callchain);
1249 int record_parse_callchain_opt(const struct option *opt,
1253 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1256 int record_callchain_opt(const struct option *opt,
1257 const char *arg __maybe_unused,
1258 int unset __maybe_unused)
1260 struct callchain_param *callchain = opt->value;
1262 callchain->enabled = true;
1264 if (callchain->record_mode == CALLCHAIN_NONE)
1265 callchain->record_mode = CALLCHAIN_FP;
1267 callchain_debug(callchain);
1271 static int perf_record_config(const char *var, const char *value, void *cb)
1273 struct record *rec = cb;
1275 if (!strcmp(var, "record.build-id")) {
1276 if (!strcmp(value, "cache"))
1277 rec->no_buildid_cache = false;
1278 else if (!strcmp(value, "no-cache"))
1279 rec->no_buildid_cache = true;
1280 else if (!strcmp(value, "skip"))
1281 rec->no_buildid = true;
1286 if (!strcmp(var, "record.call-graph")) {
1287 var = "call-graph.record-mode";
1288 return perf_default_config(var, value, cb);
1294 struct clockid_map {
1299 #define CLOCKID_MAP(n, c) \
1300 { .name = n, .clockid = (c), }
1302 #define CLOCKID_END { .name = NULL, }
1306 * Add the missing ones, we need to build on many distros...
1308 #ifndef CLOCK_MONOTONIC_RAW
1309 #define CLOCK_MONOTONIC_RAW 4
1311 #ifndef CLOCK_BOOTTIME
1312 #define CLOCK_BOOTTIME 7
1315 #define CLOCK_TAI 11
1318 static const struct clockid_map clockids[] = {
1319 /* available for all events, NMI safe */
1320 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1321 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1323 /* available for some events */
1324 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1325 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1326 CLOCKID_MAP("tai", CLOCK_TAI),
1328 /* available for the lazy */
1329 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1330 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1331 CLOCKID_MAP("real", CLOCK_REALTIME),
1332 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1337 static int parse_clockid(const struct option *opt, const char *str, int unset)
1339 struct record_opts *opts = (struct record_opts *)opt->value;
1340 const struct clockid_map *cm;
1341 const char *ostr = str;
1344 opts->use_clockid = 0;
1352 /* no setting it twice */
1353 if (opts->use_clockid)
1356 opts->use_clockid = true;
1358 /* if its a number, we're done */
1359 if (sscanf(str, "%d", &opts->clockid) == 1)
1362 /* allow a "CLOCK_" prefix to the name */
1363 if (!strncasecmp(str, "CLOCK_", 6))
1366 for (cm = clockids; cm->name; cm++) {
1367 if (!strcasecmp(str, cm->name)) {
1368 opts->clockid = cm->clockid;
1373 opts->use_clockid = false;
1374 ui__warning("unknown clockid %s, check man page\n", ostr);
1378 static int record__parse_mmap_pages(const struct option *opt,
1380 int unset __maybe_unused)
1382 struct record_opts *opts = opt->value;
1384 unsigned int mmap_pages;
1399 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1402 opts->mmap_pages = mmap_pages;
1410 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1414 opts->auxtrace_mmap_pages = mmap_pages;
1421 static void switch_output_size_warn(struct record *rec)
1423 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1424 struct switch_output *s = &rec->switch_output;
1428 if (s->size < wakeup_size) {
1431 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1432 pr_warning("WARNING: switch-output data size lower than "
1433 "wakeup kernel buffer size (%s) "
1434 "expect bigger perf.data sizes\n", buf);
1438 static int switch_output_setup(struct record *rec)
1440 struct switch_output *s = &rec->switch_output;
1441 static struct parse_tag tags_size[] = {
1442 { .tag = 'B', .mult = 1 },
1443 { .tag = 'K', .mult = 1 << 10 },
1444 { .tag = 'M', .mult = 1 << 20 },
1445 { .tag = 'G', .mult = 1 << 30 },
1448 static struct parse_tag tags_time[] = {
1449 { .tag = 's', .mult = 1 },
1450 { .tag = 'm', .mult = 60 },
1451 { .tag = 'h', .mult = 60*60 },
1452 { .tag = 'd', .mult = 60*60*24 },
1460 if (!strcmp(s->str, "signal")) {
1462 pr_debug("switch-output with SIGUSR2 signal\n");
1466 val = parse_tag_value(s->str, tags_size);
1467 if (val != (unsigned long) -1) {
1469 pr_debug("switch-output with %s size threshold\n", s->str);
1473 val = parse_tag_value(s->str, tags_time);
1474 if (val != (unsigned long) -1) {
1476 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1484 rec->timestamp_filename = true;
1487 if (s->size && !rec->opts.no_buffering)
1488 switch_output_size_warn(rec);
1493 static const char * const __record_usage[] = {
1494 "perf record [<options>] [<command>]",
1495 "perf record [<options>] -- <command> [<options>]",
1498 const char * const *record_usage = __record_usage;
1501 * XXX Ideally would be local to cmd_record() and passed to a record__new
1502 * because we need to have access to it in record__exit, that is called
1503 * after cmd_record() exits, but since record_options need to be accessible to
1504 * builtin-script, leave it here.
1506 * At least we don't ouch it in all the other functions here directly.
1508 * Just say no to tons of global variables, sigh.
1510 static struct record record = {
1512 .sample_time = true,
1513 .mmap_pages = UINT_MAX,
1514 .user_freq = UINT_MAX,
1515 .user_interval = ULLONG_MAX,
1519 .default_per_cpu = true,
1521 .proc_map_timeout = 500,
1524 .sample = process_sample_event,
1525 .fork = perf_event__process_fork,
1526 .exit = perf_event__process_exit,
1527 .comm = perf_event__process_comm,
1528 .namespaces = perf_event__process_namespaces,
1529 .mmap = perf_event__process_mmap,
1530 .mmap2 = perf_event__process_mmap2,
1531 .ordered_events = true,
1535 const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1536 "\n\t\t\t\tDefault: fp";
1538 static bool dry_run;
1541 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1542 * with it and switch to use the library functions in perf_evlist that came
1543 * from builtin-record.c, i.e. use record_opts,
1544 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1547 static struct option __record_options[] = {
1548 OPT_CALLBACK('e', "event", &record.evlist, "event",
1549 "event selector. use 'perf list' to list available events",
1550 parse_events_option),
1551 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
1552 "event filter", parse_filter),
1553 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1554 NULL, "don't record events from perf itself",
1556 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
1557 "record events on existing process id"),
1558 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
1559 "record events on existing thread id"),
1560 OPT_INTEGER('r', "realtime", &record.realtime_prio,
1561 "collect data with this RT SCHED_FIFO priority"),
1562 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
1563 "collect data without buffering"),
1564 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1565 "collect raw sample records from all opened counters"),
1566 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1567 "system-wide collection from all CPUs"),
1568 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1569 "list of cpus to monitor"),
1570 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1571 OPT_STRING('o', "output", &record.data.file.path, "file",
1572 "output file name"),
1573 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1574 &record.opts.no_inherit_set,
1575 "child tasks do not inherit counters"),
1576 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1577 "synthesize non-sample events at the end of output"),
1578 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
1579 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1580 "Fail if the specified frequency can't be used"),
1581 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1582 "profile at this frequency",
1583 record__parse_freq),
1584 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1585 "number of mmap data pages and AUX area tracing mmap pages",
1586 record__parse_mmap_pages),
1587 OPT_BOOLEAN(0, "group", &record.opts.group,
1588 "put the counters into a counter group"),
1589 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1590 NULL, "enables call-graph recording" ,
1591 &record_callchain_opt),
1592 OPT_CALLBACK(0, "call-graph", &record.opts,
1593 "record_mode[,record_size]", record_callchain_help,
1594 &record_parse_callchain_opt),
1595 OPT_INCR('v', "verbose", &verbose,
1596 "be more verbose (show counter open errors, etc)"),
1597 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1598 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1599 "per thread counts"),
1600 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
1601 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1602 "Record the sample physical addresses"),
1603 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
1604 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1605 &record.opts.sample_time_set,
1606 "Record the sample timestamps"),
1607 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1608 "Record the sample period"),
1609 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1611 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1612 &record.no_buildid_cache_set,
1613 "do not update the buildid cache"),
1614 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1615 &record.no_buildid_set,
1616 "do not collect buildids in perf.data"),
1617 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1618 "monitor event in cgroup name only",
1620 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
1621 "ms to wait before starting measurement after program start"),
1622 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1625 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1626 "branch any", "sample any taken branches",
1627 parse_branch_stack),
1629 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1630 "branch filter mask", "branch stack filter modes",
1631 parse_branch_stack),
1632 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1633 "sample by weight (on special events only)"),
1634 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1635 "sample transaction flags (special events only)"),
1636 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1637 "use per-thread mmaps"),
1638 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1639 "sample selected machine registers on interrupt,"
1640 " use -I ? to list register names", parse_regs),
1641 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1642 "sample selected machine registers on interrupt,"
1643 " use -I ? to list register names", parse_regs),
1644 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1645 "Record running/enabled time of read (:S) events"),
1646 OPT_CALLBACK('k', "clockid", &record.opts,
1647 "clockid", "clockid to use for events, see clock_gettime()",
1649 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1650 "opts", "AUX area tracing Snapshot Mode", ""),
1651 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1652 "per thread proc mmap processing timeout in ms"),
1653 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1654 "Record namespaces events"),
1655 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1656 "Record context switch events"),
1657 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1658 "Configure all used events to run in kernel space.",
1659 PARSE_OPT_EXCLUSIVE),
1660 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1661 "Configure all used events to run in user space.",
1662 PARSE_OPT_EXCLUSIVE),
1663 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1664 "clang binary to use for compiling BPF scriptlets"),
1665 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1666 "options passed to clang when compiling BPF scriptlets"),
1667 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1668 "file", "vmlinux pathname"),
1669 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1670 "Record build-id of all DSOs regardless of hits"),
1671 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1672 "append timestamp to output filename"),
1673 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1674 "Record timestamp boundary (time of first/last samples)"),
1675 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
1676 &record.switch_output.set, "signal,size,time",
1677 "Switch output when receive SIGUSR2 or cross size,time threshold",
1679 OPT_BOOLEAN(0, "dry-run", &dry_run,
1680 "Parse options then exit"),
1684 struct option *record_options = __record_options;
1686 int cmd_record(int argc, const char **argv)
1689 struct record *rec = &record;
1690 char errbuf[BUFSIZ];
1692 setlocale(LC_ALL, "");
1694 #ifndef HAVE_LIBBPF_SUPPORT
1695 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1696 set_nobuild('\0', "clang-path", true);
1697 set_nobuild('\0', "clang-opt", true);
1701 #ifndef HAVE_BPF_PROLOGUE
1702 # if !defined (HAVE_DWARF_SUPPORT)
1703 # define REASON "NO_DWARF=1"
1704 # elif !defined (HAVE_LIBBPF_SUPPORT)
1705 # define REASON "NO_LIBBPF=1"
1707 # define REASON "this architecture doesn't support BPF prologue"
1709 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1710 set_nobuild('\0', "vmlinux", true);
1715 rec->evlist = perf_evlist__new();
1716 if (rec->evlist == NULL)
1719 err = perf_config(perf_record_config, rec);
1723 argc = parse_options(argc, argv, record_options, record_usage,
1724 PARSE_OPT_STOP_AT_NON_OPTION);
1726 perf_quiet_option();
1728 /* Make system wide (-a) the default target. */
1729 if (!argc && target__none(&rec->opts.target))
1730 rec->opts.target.system_wide = true;
1732 if (nr_cgroups && !rec->opts.target.system_wide) {
1733 usage_with_options_msg(record_usage, record_options,
1734 "cgroup monitoring only available in system-wide mode");
1737 if (rec->opts.record_switch_events &&
1738 !perf_can_record_switch_events()) {
1739 ui__error("kernel does not support recording context switch events\n");
1740 parse_options_usage(record_usage, record_options, "switch-events", 0);
1744 if (switch_output_setup(rec)) {
1745 parse_options_usage(record_usage, record_options, "switch-output", 0);
1749 if (rec->switch_output.time) {
1750 signal(SIGALRM, alarm_sig_handler);
1751 alarm(rec->switch_output.time);
1755 * Allow aliases to facilitate the lookup of symbols for address
1756 * filters. Refer to auxtrace_parse_filters().
1758 symbol_conf.allow_aliases = true;
1762 err = record__auxtrace_init(rec);
1769 err = bpf__setup_stdout(rec->evlist);
1771 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1772 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1779 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
1781 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1782 "check /proc/sys/kernel/kptr_restrict.\n\n"
1783 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1784 "file is not found in the buildid cache or in the vmlinux path.\n\n"
1785 "Samples in kernel modules won't be resolved at all.\n\n"
1786 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1787 "even with a suitable vmlinux or kallsyms file.\n\n");
1789 if (rec->no_buildid_cache || rec->no_buildid) {
1790 disable_buildid_cache();
1791 } else if (rec->switch_output.enabled) {
1793 * In 'perf record --switch-output', disable buildid
1794 * generation by default to reduce data file switching
1795 * overhead. Still generate buildid if they are required
1798 * perf record --switch-output --no-no-buildid \
1799 * --no-no-buildid-cache
1801 * Following code equals to:
1803 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1804 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1805 * disable_buildid_cache();
1807 bool disable = true;
1809 if (rec->no_buildid_set && !rec->no_buildid)
1811 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1814 rec->no_buildid = true;
1815 rec->no_buildid_cache = true;
1816 disable_buildid_cache();
1820 if (record.opts.overwrite)
1821 record.opts.tail_synthesize = true;
1823 if (rec->evlist->nr_entries == 0 &&
1824 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
1825 pr_err("Not enough memory for event selector list\n");
1829 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1830 rec->opts.no_inherit = true;
1832 err = target__validate(&rec->opts.target);
1834 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1835 ui__warning("%s\n", errbuf);
1838 err = target__parse_uid(&rec->opts.target);
1840 int saved_errno = errno;
1842 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1843 ui__error("%s", errbuf);
1849 /* Enable ignoring missing threads when -u/-p option is defined. */
1850 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
1853 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1854 usage_with_options(record_usage, record_options);
1856 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1861 * We take all buildids when the file contains
1862 * AUX area tracing data because we do not decode the
1863 * trace because it would take too long.
1865 if (rec->opts.full_auxtrace)
1866 rec->buildid_all = true;
1868 if (record_opts__config(&rec->opts)) {
1873 err = __cmd_record(&record, argc, argv);
1875 perf_evlist__delete(rec->evlist);
1877 auxtrace_record__free(rec->itr);
1881 static void snapshot_sig_handler(int sig __maybe_unused)
1883 struct record *rec = &record;
1885 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1886 trigger_hit(&auxtrace_snapshot_trigger);
1887 auxtrace_record__snapshot_started = 1;
1888 if (auxtrace_record__snapshot_start(record.itr))
1889 trigger_error(&auxtrace_snapshot_trigger);
1892 if (switch_output_signal(rec))
1893 trigger_hit(&switch_output_trigger);
1896 static void alarm_sig_handler(int sig __maybe_unused)
1898 struct record *rec = &record;
1900 if (switch_output_time(rec))
1901 trigger_hit(&switch_output_trigger);