1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/zalloc.h>
13 static void __maps__insert(struct maps *maps, struct map *map);
15 static void maps__init(struct maps *maps, struct machine *machine)
17 maps->entries = RB_ROOT;
18 init_rwsem(&maps->lock);
19 maps->machine = machine;
20 maps->last_search_by_name = NULL;
22 maps->maps_by_name = NULL;
23 refcount_set(&maps->refcnt, 1);
26 static void __maps__free_maps_by_name(struct maps *maps)
29 * Free everything to try to do it from the rbtree in the next search
31 zfree(&maps->maps_by_name);
32 maps->nr_maps_allocated = 0;
35 void maps__insert(struct maps *maps, struct map *map)
37 down_write(&maps->lock);
38 __maps__insert(maps, map);
41 if (map->dso && map->dso->kernel) {
42 struct kmap *kmap = map__kmap(map);
47 pr_err("Internal error: kernel dso with non kernel map\n");
52 * If we already performed some search by name, then we need to add the just
53 * inserted map and resort.
55 if (maps->maps_by_name) {
56 if (maps->nr_maps > maps->nr_maps_allocated) {
57 int nr_allocate = maps->nr_maps * 2;
58 struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
60 if (maps_by_name == NULL) {
61 __maps__free_maps_by_name(maps);
62 up_write(&maps->lock);
66 maps->maps_by_name = maps_by_name;
67 maps->nr_maps_allocated = nr_allocate;
69 maps->maps_by_name[maps->nr_maps - 1] = map;
70 __maps__sort_by_name(maps);
72 up_write(&maps->lock);
75 static void __maps__remove(struct maps *maps, struct map *map)
77 rb_erase_init(&map->rb_node, &maps->entries);
81 void maps__remove(struct maps *maps, struct map *map)
83 down_write(&maps->lock);
84 if (maps->last_search_by_name == map)
85 maps->last_search_by_name = NULL;
87 __maps__remove(maps, map);
89 if (maps->maps_by_name)
90 __maps__free_maps_by_name(maps);
91 up_write(&maps->lock);
94 static void __maps__purge(struct maps *maps)
96 struct map *pos, *next;
98 maps__for_each_entry_safe(maps, pos, next) {
99 rb_erase_init(&pos->rb_node, &maps->entries);
104 static void maps__exit(struct maps *maps)
106 down_write(&maps->lock);
108 up_write(&maps->lock);
111 bool maps__empty(struct maps *maps)
113 return !maps__first(maps);
116 struct maps *maps__new(struct machine *machine)
118 struct maps *maps = zalloc(sizeof(*maps));
121 maps__init(maps, machine);
126 void maps__delete(struct maps *maps)
129 unwind__finish_access(maps);
133 void maps__put(struct maps *maps)
135 if (maps && refcount_dec_and_test(&maps->refcnt))
139 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
141 struct map *map = maps__find(maps, addr);
143 /* Ensure map is loaded before using map->map_ip */
144 if (map != NULL && map__load(map) >= 0) {
147 return map__find_symbol(map, map->map_ip(map, addr));
153 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
158 down_read(&maps->lock);
160 maps__for_each_entry(maps, pos) {
161 sym = map__find_symbol_by_name(pos, name);
165 if (!map__contains_symbol(pos, sym)) {
176 up_read(&maps->lock);
180 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
182 if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
185 ams->ms.map = maps__find(maps, ams->addr);
186 if (ams->ms.map == NULL)
190 ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
191 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
193 return ams->ms.sym ? 0 : -1;
196 size_t maps__fprintf(struct maps *maps, FILE *fp)
201 down_read(&maps->lock);
203 maps__for_each_entry(maps, pos) {
204 printed += fprintf(fp, "Map:");
205 printed += map__fprintf(pos, fp);
207 printed += dso__fprintf(pos->dso, fp);
208 printed += fprintf(fp, "--\n");
212 up_read(&maps->lock);
217 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
219 struct rb_root *root;
220 struct rb_node *next, *first;
223 down_write(&maps->lock);
225 root = &maps->entries;
228 * Find first map where end > map->start.
229 * Same as find_vma() in kernel.
231 next = root->rb_node;
234 struct map *pos = rb_entry(next, struct map, rb_node);
236 if (pos->end > map->start) {
238 if (pos->start <= map->start)
240 next = next->rb_left;
242 next = next->rb_right;
247 struct map *pos = rb_entry(next, struct map, rb_node);
248 next = rb_next(&pos->rb_node);
251 * Stop if current map starts after map->end.
252 * Maps are ordered by start: next will not overlap for sure.
254 if (pos->start >= map->end)
260 pr_debug("overlapping maps in %s (disable tui for more info)\n",
263 fputs("overlapping maps:\n", fp);
264 map__fprintf(map, fp);
265 map__fprintf(pos, fp);
269 rb_erase_init(&pos->rb_node, root);
271 * Now check if we need to create new maps for areas not
272 * overlapped by the new map:
274 if (map->start > pos->start) {
275 struct map *before = map__clone(pos);
277 if (before == NULL) {
282 before->end = map->start;
283 __maps__insert(maps, before);
284 if (verbose >= 2 && !use_browser)
285 map__fprintf(before, fp);
289 if (map->end < pos->end) {
290 struct map *after = map__clone(pos);
297 after->start = map->end;
298 after->pgoff += map->end - pos->start;
299 assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
300 __maps__insert(maps, after);
301 if (verbose >= 2 && !use_browser)
302 map__fprintf(after, fp);
314 up_write(&maps->lock);
319 * XXX This should not really _copy_ te maps, but refcount them.
321 int maps__clone(struct thread *thread, struct maps *parent)
323 struct maps *maps = thread->maps;
327 down_read(&parent->lock);
329 maps__for_each_entry(parent, map) {
330 struct map *new = map__clone(map);
337 err = unwind__prepare_access(maps, new, NULL);
341 maps__insert(maps, new);
347 up_read(&parent->lock);
351 static void __maps__insert(struct maps *maps, struct map *map)
353 struct rb_node **p = &maps->entries.rb_node;
354 struct rb_node *parent = NULL;
355 const u64 ip = map->start;
360 m = rb_entry(parent, struct map, rb_node);
367 rb_link_node(&map->rb_node, parent, p);
368 rb_insert_color(&map->rb_node, &maps->entries);
372 struct map *maps__find(struct maps *maps, u64 ip)
377 down_read(&maps->lock);
379 p = maps->entries.rb_node;
381 m = rb_entry(p, struct map, rb_node);
384 else if (ip >= m->end)
392 up_read(&maps->lock);
396 struct map *maps__first(struct maps *maps)
398 struct rb_node *first = rb_first(&maps->entries);
401 return rb_entry(first, struct map, rb_node);