1 // SPDX-License-Identifier: GPL-2.0-only
8 #include "util/pmu-hybrid.h"
9 #include "util/evlist-hybrid.h"
13 #include <linux/err.h>
14 #include <linux/string.h>
15 #include <perf/evlist.h>
16 #include <perf/evsel.h>
17 #include <perf/cpumap.h>
19 int evlist__add_default_hybrid(struct evlist *evlist, bool precise)
24 struct perf_cpu_map *cpus;
26 perf_pmu__for_each_hybrid_pmu(pmu) {
27 config = PERF_COUNT_HW_CPU_CYCLES |
28 ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT);
29 evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
34 cpus = perf_cpu_map__get(pmu->cpus);
35 evsel->core.cpus = cpus;
36 evsel->core.own_cpus = perf_cpu_map__get(cpus);
37 evsel->pmu_name = strdup(pmu->name);
38 evlist__add(evlist, evsel);
44 static bool group_hybrid_conflict(struct evsel *leader)
46 struct evsel *pos, *prev = NULL;
48 for_each_group_evsel(pos, leader) {
49 if (!evsel__is_hybrid(pos))
52 if (prev && strcmp(prev->pmu_name, pos->pmu_name))
61 void evlist__warn_hybrid_group(struct evlist *evlist)
65 evlist__for_each_entry(evlist, evsel) {
66 if (evsel__is_group_leader(evsel) &&
67 evsel->core.nr_members > 1 &&
68 group_hybrid_conflict(evsel)) {
69 pr_warning("WARNING: events in group from "
70 "different hybrid PMUs!\n");
76 bool evlist__has_hybrid(struct evlist *evlist)
80 evlist__for_each_entry(evlist, evsel) {
81 if (evsel->pmu_name &&
82 perf_pmu__is_hybrid(evsel->pmu_name)) {
90 int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
92 struct perf_cpu_map *cpus;
93 struct evsel *evsel, *tmp;
95 int ret, unmatched_count = 0, events_nr = 0;
97 if (!perf_pmu__has_hybrid() || !cpu_list)
100 cpus = perf_cpu_map__new(cpu_list);
105 * The evsels are created with hybrid pmu's cpus. But now we
106 * need to check and adjust the cpus of evsel by cpu_list because
107 * cpu_list may cause conflicts with cpus of evsel. For example,
108 * cpus of evsel is cpu0-7, but the cpu_list is cpu6-8, we need
109 * to adjust the cpus of evsel to cpu6-7. And then propatate maps
110 * in evlist__create_maps().
112 evlist__for_each_entry_safe(evlist, tmp, evsel) {
113 struct perf_cpu_map *matched_cpus, *unmatched_cpus;
114 char buf1[128], buf2[128];
116 pmu = perf_pmu__find_hybrid_pmu(evsel->pmu_name);
120 ret = perf_pmu__cpus_match(pmu, cpus, &matched_cpus,
127 if (perf_cpu_map__nr(matched_cpus) > 0 &&
128 (perf_cpu_map__nr(unmatched_cpus) > 0 ||
129 perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) ||
130 perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) {
131 perf_cpu_map__put(evsel->core.cpus);
132 perf_cpu_map__put(evsel->core.own_cpus);
133 evsel->core.cpus = perf_cpu_map__get(matched_cpus);
134 evsel->core.own_cpus = perf_cpu_map__get(matched_cpus);
136 if (perf_cpu_map__nr(unmatched_cpus) > 0) {
137 cpu_map__snprint(matched_cpus, buf1, sizeof(buf1));
138 pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n",
139 buf1, pmu->name, evsel->name);
143 if (perf_cpu_map__nr(matched_cpus) == 0) {
144 evlist__remove(evlist, evsel);
145 evsel__delete(evsel);
147 cpu_map__snprint(cpus, buf1, sizeof(buf1));
148 cpu_map__snprint(pmu->cpus, buf2, sizeof(buf2));
149 pr_warning("WARNING: %s isn't a '%s', please use a CPU list in the '%s' range (%s)\n",
150 buf1, pmu->name, pmu->name, buf2);
154 perf_cpu_map__put(matched_cpus);
155 perf_cpu_map__put(unmatched_cpus);
158 ret = (unmatched_count == events_nr) ? -1 : 0;
160 perf_cpu_map__put(cpus);