1 // SPDX-License-Identifier: GPL-2.0
5 #include "parse-events.h"
11 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
13 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
15 struct perf_evlist *evlist;
16 struct perf_evsel *evsel;
17 unsigned long flags = perf_event_open_cloexec_flag();
18 int err = -EAGAIN, fd;
19 static pid_t pid = -1;
21 evlist = perf_evlist__new();
25 if (parse_events(evlist, str, NULL))
28 evsel = perf_evlist__first(evlist);
31 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
33 if (pid == -1 && errno == EACCES) {
45 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
55 perf_evlist__delete(evlist);
59 static bool perf_probe_api(setup_probe_fn_t fn)
61 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
65 cpus = cpu_map__new(NULL);
72 ret = perf_do_probe_api(fn, cpu, try[i++]);
75 } while (ret == -EAGAIN && try[i]);
80 static void perf_probe_sample_identifier(struct perf_evsel *evsel)
82 evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
85 static void perf_probe_comm_exec(struct perf_evsel *evsel)
87 evsel->attr.comm_exec = 1;
90 static void perf_probe_context_switch(struct perf_evsel *evsel)
92 evsel->attr.context_switch = 1;
95 bool perf_can_sample_identifier(void)
97 return perf_probe_api(perf_probe_sample_identifier);
100 static bool perf_can_comm_exec(void)
102 return perf_probe_api(perf_probe_comm_exec);
105 bool perf_can_record_switch_events(void)
107 return perf_probe_api(perf_probe_context_switch);
110 bool perf_can_record_cpu_wide(void)
112 struct perf_event_attr attr = {
113 .type = PERF_TYPE_SOFTWARE,
114 .config = PERF_COUNT_SW_CPU_CLOCK,
117 struct cpu_map *cpus;
120 cpus = cpu_map__new(NULL);
126 fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
134 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
135 struct callchain_param *callchain)
137 struct perf_evsel *evsel;
138 bool use_sample_identifier = false;
142 * Set the evsel leader links before we configure attributes,
143 * since some might depend on this info.
146 perf_evlist__set_leader(evlist);
148 if (evlist->cpus->map[0] < 0)
149 opts->no_inherit = true;
151 use_comm_exec = perf_can_comm_exec();
153 evlist__for_each_entry(evlist, evsel) {
154 perf_evsel__config(evsel, opts, callchain);
155 if (evsel->tracking && use_comm_exec)
156 evsel->attr.comm_exec = 1;
159 if (opts->full_auxtrace) {
161 * Need to be able to synthesize and parse selected events with
162 * arbitrary sample types, which requires always being able to
165 use_sample_identifier = perf_can_sample_identifier();
166 evlist__for_each_entry(evlist, evsel)
167 perf_evsel__set_sample_id(evsel, use_sample_identifier);
168 } else if (evlist->nr_entries > 1) {
169 struct perf_evsel *first = perf_evlist__first(evlist);
171 evlist__for_each_entry(evlist, evsel) {
172 if (evsel->attr.sample_type == first->attr.sample_type)
174 use_sample_identifier = perf_can_sample_identifier();
177 evlist__for_each_entry(evlist, evsel)
178 perf_evsel__set_sample_id(evsel, use_sample_identifier);
181 perf_evlist__set_id_pos(evlist);
184 static int get_max_rate(unsigned int *rate)
186 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
189 static int record_opts__config_freq(struct record_opts *opts)
191 bool user_freq = opts->user_freq != UINT_MAX;
192 unsigned int max_rate;
194 if (opts->user_interval != ULLONG_MAX)
195 opts->default_interval = opts->user_interval;
197 opts->freq = opts->user_freq;
200 * User specified count overrides default frequency.
202 if (opts->default_interval)
204 else if (opts->freq) {
205 opts->default_interval = opts->freq;
207 pr_err("frequency and count are zero, aborting\n");
211 if (get_max_rate(&max_rate))
215 * User specified frequency is over current maximum.
217 if (user_freq && (max_rate < opts->freq)) {
218 pr_err("Maximum frequency rate (%u) reached.\n"
219 "Please use -F freq option with lower value or consider\n"
220 "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
226 * Default frequency is over current maximum.
228 if (max_rate < opts->freq) {
229 pr_warning("Lowering default frequency rate to %u.\n"
230 "Please consider tweaking "
231 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
233 opts->freq = max_rate;
239 int record_opts__config(struct record_opts *opts)
241 return record_opts__config_freq(opts);
244 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
246 struct perf_evlist *temp_evlist;
247 struct perf_evsel *evsel;
252 temp_evlist = perf_evlist__new();
256 err = parse_events(temp_evlist, str, NULL);
260 evsel = perf_evlist__last(temp_evlist);
262 if (!evlist || cpu_map__empty(evlist->cpus)) {
263 struct cpu_map *cpus = cpu_map__new(NULL);
265 cpu = cpus ? cpus->map[0] : 0;
268 cpu = evlist->cpus->map[0];
272 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
273 perf_event_open_cloexec_flag());
275 if (pid == -1 && errno == EACCES) {
287 perf_evlist__delete(temp_evlist);