GNU Linux-libre 5.10.217-gnu1
[releases.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14
15 #include "map_symbol.h"
16 #include "branch.h"
17 #include "debug.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "memswap.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "session.h"
24 #include "tool.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "ui/progress.h"
33 #include "../perf.h"
34 #include "arch/common.h"
35 #include <internal/lib.h>
36
37 #ifdef HAVE_ZSTD_SUPPORT
38 static int perf_session__process_compressed_event(struct perf_session *session,
39                                                   union perf_event *event, u64 file_offset)
40 {
41         void *src;
42         size_t decomp_size, src_size;
43         u64 decomp_last_rem = 0;
44         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
45         struct decomp *decomp, *decomp_last = session->decomp_last;
46
47         if (decomp_last) {
48                 decomp_last_rem = decomp_last->size - decomp_last->head;
49                 decomp_len += decomp_last_rem;
50         }
51
52         mmap_len = sizeof(struct decomp) + decomp_len;
53         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
54                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
55         if (decomp == MAP_FAILED) {
56                 pr_err("Couldn't allocate memory for decompression\n");
57                 return -1;
58         }
59
60         decomp->file_pos = file_offset;
61         decomp->mmap_len = mmap_len;
62         decomp->head = 0;
63
64         if (decomp_last_rem) {
65                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
66                 decomp->size = decomp_last_rem;
67         }
68
69         src = (void *)event + sizeof(struct perf_record_compressed);
70         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
71
72         decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
73                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
74         if (!decomp_size) {
75                 munmap(decomp, mmap_len);
76                 pr_err("Couldn't decompress data\n");
77                 return -1;
78         }
79
80         decomp->size += decomp_size;
81
82         if (session->decomp == NULL) {
83                 session->decomp = decomp;
84                 session->decomp_last = decomp;
85         } else {
86                 session->decomp_last->next = decomp;
87                 session->decomp_last = decomp;
88         }
89
90         pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
91
92         return 0;
93 }
94 #else /* !HAVE_ZSTD_SUPPORT */
95 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
96 #endif
97
98 static int perf_session__deliver_event(struct perf_session *session,
99                                        union perf_event *event,
100                                        struct perf_tool *tool,
101                                        u64 file_offset);
102
103 static int perf_session__open(struct perf_session *session)
104 {
105         struct perf_data *data = session->data;
106
107         if (perf_session__read_header(session) < 0) {
108                 pr_err("incompatible file format (rerun with -v to learn more)\n");
109                 return -1;
110         }
111
112         if (perf_data__is_pipe(data))
113                 return 0;
114
115         if (perf_header__has_feat(&session->header, HEADER_STAT))
116                 return 0;
117
118         if (!evlist__valid_sample_type(session->evlist)) {
119                 pr_err("non matching sample_type\n");
120                 return -1;
121         }
122
123         if (!evlist__valid_sample_id_all(session->evlist)) {
124                 pr_err("non matching sample_id_all\n");
125                 return -1;
126         }
127
128         if (!perf_evlist__valid_read_format(session->evlist)) {
129                 pr_err("non matching read_format\n");
130                 return -1;
131         }
132
133         return 0;
134 }
135
136 void perf_session__set_id_hdr_size(struct perf_session *session)
137 {
138         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
139
140         machines__set_id_hdr_size(&session->machines, id_hdr_size);
141 }
142
143 int perf_session__create_kernel_maps(struct perf_session *session)
144 {
145         int ret = machine__create_kernel_maps(&session->machines.host);
146
147         if (ret >= 0)
148                 ret = machines__create_guest_kernel_maps(&session->machines);
149         return ret;
150 }
151
152 static void perf_session__destroy_kernel_maps(struct perf_session *session)
153 {
154         machines__destroy_kernel_maps(&session->machines);
155 }
156
157 static bool perf_session__has_comm_exec(struct perf_session *session)
158 {
159         struct evsel *evsel;
160
161         evlist__for_each_entry(session->evlist, evsel) {
162                 if (evsel->core.attr.comm_exec)
163                         return true;
164         }
165
166         return false;
167 }
168
169 static void perf_session__set_comm_exec(struct perf_session *session)
170 {
171         bool comm_exec = perf_session__has_comm_exec(session);
172
173         machines__set_comm_exec(&session->machines, comm_exec);
174 }
175
176 static int ordered_events__deliver_event(struct ordered_events *oe,
177                                          struct ordered_event *event)
178 {
179         struct perf_session *session = container_of(oe, struct perf_session,
180                                                     ordered_events);
181
182         return perf_session__deliver_event(session, event->event,
183                                            session->tool, event->file_offset);
184 }
185
186 struct perf_session *perf_session__new(struct perf_data *data,
187                                        bool repipe, struct perf_tool *tool)
188 {
189         int ret = -ENOMEM;
190         struct perf_session *session = zalloc(sizeof(*session));
191
192         if (!session)
193                 goto out;
194
195         session->repipe = repipe;
196         session->tool   = tool;
197         INIT_LIST_HEAD(&session->auxtrace_index);
198         machines__init(&session->machines);
199         ordered_events__init(&session->ordered_events,
200                              ordered_events__deliver_event, NULL);
201
202         perf_env__init(&session->header.env);
203         if (data) {
204                 ret = perf_data__open(data);
205                 if (ret < 0)
206                         goto out_delete;
207
208                 session->data = data;
209
210                 if (perf_data__is_read(data)) {
211                         ret = perf_session__open(session);
212                         if (ret < 0)
213                                 goto out_delete;
214
215                         /*
216                          * set session attributes that are present in perf.data
217                          * but not in pipe-mode.
218                          */
219                         if (!data->is_pipe) {
220                                 perf_session__set_id_hdr_size(session);
221                                 perf_session__set_comm_exec(session);
222                         }
223
224                         perf_evlist__init_trace_event_sample_raw(session->evlist);
225
226                         /* Open the directory data. */
227                         if (data->is_dir) {
228                                 ret = perf_data__open_dir(data);
229                                 if (ret)
230                                         goto out_delete;
231                         }
232
233                         if (!symbol_conf.kallsyms_name &&
234                             !symbol_conf.vmlinux_name)
235                                 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
236                 }
237         } else  {
238                 session->machines.host.env = &perf_env;
239         }
240
241         session->machines.host.single_address_space =
242                 perf_env__single_address_space(session->machines.host.env);
243
244         if (!data || perf_data__is_write(data)) {
245                 /*
246                  * In O_RDONLY mode this will be performed when reading the
247                  * kernel MMAP event, in perf_event__process_mmap().
248                  */
249                 if (perf_session__create_kernel_maps(session) < 0)
250                         pr_warning("Cannot read kernel map\n");
251         }
252
253         /*
254          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
255          * processed, so evlist__sample_id_all is not meaningful here.
256          */
257         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
258             tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
259                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
260                 tool->ordered_events = false;
261         }
262
263         return session;
264
265  out_delete:
266         perf_session__delete(session);
267  out:
268         return ERR_PTR(ret);
269 }
270
271 static void perf_session__delete_threads(struct perf_session *session)
272 {
273         machine__delete_threads(&session->machines.host);
274 }
275
276 static void perf_session__release_decomp_events(struct perf_session *session)
277 {
278         struct decomp *next, *decomp;
279         size_t mmap_len;
280         next = session->decomp;
281         do {
282                 decomp = next;
283                 if (decomp == NULL)
284                         break;
285                 next = decomp->next;
286                 mmap_len = decomp->mmap_len;
287                 munmap(decomp, mmap_len);
288         } while (1);
289 }
290
291 void perf_session__delete(struct perf_session *session)
292 {
293         if (session == NULL)
294                 return;
295         auxtrace__free(session);
296         auxtrace_index__free(&session->auxtrace_index);
297         perf_session__destroy_kernel_maps(session);
298         perf_session__delete_threads(session);
299         perf_session__release_decomp_events(session);
300         perf_env__exit(&session->header.env);
301         machines__exit(&session->machines);
302         if (session->data)
303                 perf_data__close(session->data);
304         free(session);
305 }
306
307 static int process_event_synth_tracing_data_stub(struct perf_session *session
308                                                  __maybe_unused,
309                                                  union perf_event *event
310                                                  __maybe_unused)
311 {
312         dump_printf(": unhandled!\n");
313         return 0;
314 }
315
316 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
317                                          union perf_event *event __maybe_unused,
318                                          struct evlist **pevlist
319                                          __maybe_unused)
320 {
321         dump_printf(": unhandled!\n");
322         return 0;
323 }
324
325 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
326                                                  union perf_event *event __maybe_unused,
327                                                  struct evlist **pevlist
328                                                  __maybe_unused)
329 {
330         if (dump_trace)
331                 perf_event__fprintf_event_update(event, stdout);
332
333         dump_printf(": unhandled!\n");
334         return 0;
335 }
336
337 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
338                                      union perf_event *event __maybe_unused,
339                                      struct perf_sample *sample __maybe_unused,
340                                      struct evsel *evsel __maybe_unused,
341                                      struct machine *machine __maybe_unused)
342 {
343         dump_printf(": unhandled!\n");
344         return 0;
345 }
346
347 static int process_event_stub(struct perf_tool *tool __maybe_unused,
348                               union perf_event *event __maybe_unused,
349                               struct perf_sample *sample __maybe_unused,
350                               struct machine *machine __maybe_unused)
351 {
352         dump_printf(": unhandled!\n");
353         return 0;
354 }
355
356 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
357                                        union perf_event *event __maybe_unused,
358                                        struct ordered_events *oe __maybe_unused)
359 {
360         dump_printf(": unhandled!\n");
361         return 0;
362 }
363
364 static int process_finished_round(struct perf_tool *tool,
365                                   union perf_event *event,
366                                   struct ordered_events *oe);
367
368 static int skipn(int fd, off_t n)
369 {
370         char buf[4096];
371         ssize_t ret;
372
373         while (n > 0) {
374                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
375                 if (ret <= 0)
376                         return ret;
377                 n -= ret;
378         }
379
380         return 0;
381 }
382
383 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
384                                        union perf_event *event)
385 {
386         dump_printf(": unhandled!\n");
387         if (perf_data__is_pipe(session->data))
388                 skipn(perf_data__fd(session->data), event->auxtrace.size);
389         return event->auxtrace.size;
390 }
391
392 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
393                                   union perf_event *event __maybe_unused)
394 {
395         dump_printf(": unhandled!\n");
396         return 0;
397 }
398
399
400 static
401 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
402                                   union perf_event *event __maybe_unused)
403 {
404         if (dump_trace)
405                 perf_event__fprintf_thread_map(event, stdout);
406
407         dump_printf(": unhandled!\n");
408         return 0;
409 }
410
411 static
412 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
413                                union perf_event *event __maybe_unused)
414 {
415         if (dump_trace)
416                 perf_event__fprintf_cpu_map(event, stdout);
417
418         dump_printf(": unhandled!\n");
419         return 0;
420 }
421
422 static
423 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
424                                    union perf_event *event __maybe_unused)
425 {
426         if (dump_trace)
427                 perf_event__fprintf_stat_config(event, stdout);
428
429         dump_printf(": unhandled!\n");
430         return 0;
431 }
432
433 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
434                              union perf_event *event)
435 {
436         if (dump_trace)
437                 perf_event__fprintf_stat(event, stdout);
438
439         dump_printf(": unhandled!\n");
440         return 0;
441 }
442
443 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
444                                    union perf_event *event)
445 {
446         if (dump_trace)
447                 perf_event__fprintf_stat_round(event, stdout);
448
449         dump_printf(": unhandled!\n");
450         return 0;
451 }
452
453 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
454                                                        union perf_event *event __maybe_unused,
455                                                        u64 file_offset __maybe_unused)
456 {
457        dump_printf(": unhandled!\n");
458        return 0;
459 }
460
461 void perf_tool__fill_defaults(struct perf_tool *tool)
462 {
463         if (tool->sample == NULL)
464                 tool->sample = process_event_sample_stub;
465         if (tool->mmap == NULL)
466                 tool->mmap = process_event_stub;
467         if (tool->mmap2 == NULL)
468                 tool->mmap2 = process_event_stub;
469         if (tool->comm == NULL)
470                 tool->comm = process_event_stub;
471         if (tool->namespaces == NULL)
472                 tool->namespaces = process_event_stub;
473         if (tool->cgroup == NULL)
474                 tool->cgroup = process_event_stub;
475         if (tool->fork == NULL)
476                 tool->fork = process_event_stub;
477         if (tool->exit == NULL)
478                 tool->exit = process_event_stub;
479         if (tool->lost == NULL)
480                 tool->lost = perf_event__process_lost;
481         if (tool->lost_samples == NULL)
482                 tool->lost_samples = perf_event__process_lost_samples;
483         if (tool->aux == NULL)
484                 tool->aux = perf_event__process_aux;
485         if (tool->itrace_start == NULL)
486                 tool->itrace_start = perf_event__process_itrace_start;
487         if (tool->context_switch == NULL)
488                 tool->context_switch = perf_event__process_switch;
489         if (tool->ksymbol == NULL)
490                 tool->ksymbol = perf_event__process_ksymbol;
491         if (tool->bpf == NULL)
492                 tool->bpf = perf_event__process_bpf;
493         if (tool->text_poke == NULL)
494                 tool->text_poke = perf_event__process_text_poke;
495         if (tool->read == NULL)
496                 tool->read = process_event_sample_stub;
497         if (tool->throttle == NULL)
498                 tool->throttle = process_event_stub;
499         if (tool->unthrottle == NULL)
500                 tool->unthrottle = process_event_stub;
501         if (tool->attr == NULL)
502                 tool->attr = process_event_synth_attr_stub;
503         if (tool->event_update == NULL)
504                 tool->event_update = process_event_synth_event_update_stub;
505         if (tool->tracing_data == NULL)
506                 tool->tracing_data = process_event_synth_tracing_data_stub;
507         if (tool->build_id == NULL)
508                 tool->build_id = process_event_op2_stub;
509         if (tool->finished_round == NULL) {
510                 if (tool->ordered_events)
511                         tool->finished_round = process_finished_round;
512                 else
513                         tool->finished_round = process_finished_round_stub;
514         }
515         if (tool->id_index == NULL)
516                 tool->id_index = process_event_op2_stub;
517         if (tool->auxtrace_info == NULL)
518                 tool->auxtrace_info = process_event_op2_stub;
519         if (tool->auxtrace == NULL)
520                 tool->auxtrace = process_event_auxtrace_stub;
521         if (tool->auxtrace_error == NULL)
522                 tool->auxtrace_error = process_event_op2_stub;
523         if (tool->thread_map == NULL)
524                 tool->thread_map = process_event_thread_map_stub;
525         if (tool->cpu_map == NULL)
526                 tool->cpu_map = process_event_cpu_map_stub;
527         if (tool->stat_config == NULL)
528                 tool->stat_config = process_event_stat_config_stub;
529         if (tool->stat == NULL)
530                 tool->stat = process_stat_stub;
531         if (tool->stat_round == NULL)
532                 tool->stat_round = process_stat_round_stub;
533         if (tool->time_conv == NULL)
534                 tool->time_conv = process_event_op2_stub;
535         if (tool->feature == NULL)
536                 tool->feature = process_event_op2_stub;
537         if (tool->compressed == NULL)
538                 tool->compressed = perf_session__process_compressed_event;
539 }
540
541 static void swap_sample_id_all(union perf_event *event, void *data)
542 {
543         void *end = (void *) event + event->header.size;
544         int size = end - data;
545
546         BUG_ON(size % sizeof(u64));
547         mem_bswap_64(data, size);
548 }
549
550 static void perf_event__all64_swap(union perf_event *event,
551                                    bool sample_id_all __maybe_unused)
552 {
553         struct perf_event_header *hdr = &event->header;
554         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
555 }
556
557 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
558 {
559         event->comm.pid = bswap_32(event->comm.pid);
560         event->comm.tid = bswap_32(event->comm.tid);
561
562         if (sample_id_all) {
563                 void *data = &event->comm.comm;
564
565                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
566                 swap_sample_id_all(event, data);
567         }
568 }
569
570 static void perf_event__mmap_swap(union perf_event *event,
571                                   bool sample_id_all)
572 {
573         event->mmap.pid   = bswap_32(event->mmap.pid);
574         event->mmap.tid   = bswap_32(event->mmap.tid);
575         event->mmap.start = bswap_64(event->mmap.start);
576         event->mmap.len   = bswap_64(event->mmap.len);
577         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
578
579         if (sample_id_all) {
580                 void *data = &event->mmap.filename;
581
582                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
583                 swap_sample_id_all(event, data);
584         }
585 }
586
587 static void perf_event__mmap2_swap(union perf_event *event,
588                                   bool sample_id_all)
589 {
590         event->mmap2.pid   = bswap_32(event->mmap2.pid);
591         event->mmap2.tid   = bswap_32(event->mmap2.tid);
592         event->mmap2.start = bswap_64(event->mmap2.start);
593         event->mmap2.len   = bswap_64(event->mmap2.len);
594         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
595         event->mmap2.maj   = bswap_32(event->mmap2.maj);
596         event->mmap2.min   = bswap_32(event->mmap2.min);
597         event->mmap2.ino   = bswap_64(event->mmap2.ino);
598         event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
599
600         if (sample_id_all) {
601                 void *data = &event->mmap2.filename;
602
603                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
604                 swap_sample_id_all(event, data);
605         }
606 }
607 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
608 {
609         event->fork.pid  = bswap_32(event->fork.pid);
610         event->fork.tid  = bswap_32(event->fork.tid);
611         event->fork.ppid = bswap_32(event->fork.ppid);
612         event->fork.ptid = bswap_32(event->fork.ptid);
613         event->fork.time = bswap_64(event->fork.time);
614
615         if (sample_id_all)
616                 swap_sample_id_all(event, &event->fork + 1);
617 }
618
619 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
620 {
621         event->read.pid          = bswap_32(event->read.pid);
622         event->read.tid          = bswap_32(event->read.tid);
623         event->read.value        = bswap_64(event->read.value);
624         event->read.time_enabled = bswap_64(event->read.time_enabled);
625         event->read.time_running = bswap_64(event->read.time_running);
626         event->read.id           = bswap_64(event->read.id);
627
628         if (sample_id_all)
629                 swap_sample_id_all(event, &event->read + 1);
630 }
631
632 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
633 {
634         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
635         event->aux.aux_size   = bswap_64(event->aux.aux_size);
636         event->aux.flags      = bswap_64(event->aux.flags);
637
638         if (sample_id_all)
639                 swap_sample_id_all(event, &event->aux + 1);
640 }
641
642 static void perf_event__itrace_start_swap(union perf_event *event,
643                                           bool sample_id_all)
644 {
645         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
646         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
647
648         if (sample_id_all)
649                 swap_sample_id_all(event, &event->itrace_start + 1);
650 }
651
652 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
653 {
654         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
655                 event->context_switch.next_prev_pid =
656                                 bswap_32(event->context_switch.next_prev_pid);
657                 event->context_switch.next_prev_tid =
658                                 bswap_32(event->context_switch.next_prev_tid);
659         }
660
661         if (sample_id_all)
662                 swap_sample_id_all(event, &event->context_switch + 1);
663 }
664
665 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
666 {
667         event->text_poke.addr    = bswap_64(event->text_poke.addr);
668         event->text_poke.old_len = bswap_16(event->text_poke.old_len);
669         event->text_poke.new_len = bswap_16(event->text_poke.new_len);
670
671         if (sample_id_all) {
672                 size_t len = sizeof(event->text_poke.old_len) +
673                              sizeof(event->text_poke.new_len) +
674                              event->text_poke.old_len +
675                              event->text_poke.new_len;
676                 void *data = &event->text_poke.old_len;
677
678                 data += PERF_ALIGN(len, sizeof(u64));
679                 swap_sample_id_all(event, data);
680         }
681 }
682
683 static void perf_event__throttle_swap(union perf_event *event,
684                                       bool sample_id_all)
685 {
686         event->throttle.time      = bswap_64(event->throttle.time);
687         event->throttle.id        = bswap_64(event->throttle.id);
688         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
689
690         if (sample_id_all)
691                 swap_sample_id_all(event, &event->throttle + 1);
692 }
693
694 static void perf_event__namespaces_swap(union perf_event *event,
695                                         bool sample_id_all)
696 {
697         u64 i;
698
699         event->namespaces.pid           = bswap_32(event->namespaces.pid);
700         event->namespaces.tid           = bswap_32(event->namespaces.tid);
701         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
702
703         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
704                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
705
706                 ns->dev = bswap_64(ns->dev);
707                 ns->ino = bswap_64(ns->ino);
708         }
709
710         if (sample_id_all)
711                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
712 }
713
714 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
715 {
716         event->cgroup.id = bswap_64(event->cgroup.id);
717
718         if (sample_id_all) {
719                 void *data = &event->cgroup.path;
720
721                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
722                 swap_sample_id_all(event, data);
723         }
724 }
725
726 static u8 revbyte(u8 b)
727 {
728         int rev = (b >> 4) | ((b & 0xf) << 4);
729         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
730         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
731         return (u8) rev;
732 }
733
734 /*
735  * XXX this is hack in attempt to carry flags bitfield
736  * through endian village. ABI says:
737  *
738  * Bit-fields are allocated from right to left (least to most significant)
739  * on little-endian implementations and from left to right (most to least
740  * significant) on big-endian implementations.
741  *
742  * The above seems to be byte specific, so we need to reverse each
743  * byte of the bitfield. 'Internet' also says this might be implementation
744  * specific and we probably need proper fix and carry perf_event_attr
745  * bitfield flags in separate data file FEAT_ section. Thought this seems
746  * to work for now.
747  */
748 static void swap_bitfield(u8 *p, unsigned len)
749 {
750         unsigned i;
751
752         for (i = 0; i < len; i++) {
753                 *p = revbyte(*p);
754                 p++;
755         }
756 }
757
758 /* exported for swapping attributes in file header */
759 void perf_event__attr_swap(struct perf_event_attr *attr)
760 {
761         attr->type              = bswap_32(attr->type);
762         attr->size              = bswap_32(attr->size);
763
764 #define bswap_safe(f, n)                                        \
765         (attr->size > (offsetof(struct perf_event_attr, f) +    \
766                        sizeof(attr->f) * (n)))
767 #define bswap_field(f, sz)                      \
768 do {                                            \
769         if (bswap_safe(f, 0))                   \
770                 attr->f = bswap_##sz(attr->f);  \
771 } while(0)
772 #define bswap_field_16(f) bswap_field(f, 16)
773 #define bswap_field_32(f) bswap_field(f, 32)
774 #define bswap_field_64(f) bswap_field(f, 64)
775
776         bswap_field_64(config);
777         bswap_field_64(sample_period);
778         bswap_field_64(sample_type);
779         bswap_field_64(read_format);
780         bswap_field_32(wakeup_events);
781         bswap_field_32(bp_type);
782         bswap_field_64(bp_addr);
783         bswap_field_64(bp_len);
784         bswap_field_64(branch_sample_type);
785         bswap_field_64(sample_regs_user);
786         bswap_field_32(sample_stack_user);
787         bswap_field_32(aux_watermark);
788         bswap_field_16(sample_max_stack);
789         bswap_field_32(aux_sample_size);
790
791         /*
792          * After read_format are bitfields. Check read_format because
793          * we are unable to use offsetof on bitfield.
794          */
795         if (bswap_safe(read_format, 1))
796                 swap_bitfield((u8 *) (&attr->read_format + 1),
797                               sizeof(u64));
798 #undef bswap_field_64
799 #undef bswap_field_32
800 #undef bswap_field
801 #undef bswap_safe
802 }
803
804 static void perf_event__hdr_attr_swap(union perf_event *event,
805                                       bool sample_id_all __maybe_unused)
806 {
807         size_t size;
808
809         perf_event__attr_swap(&event->attr.attr);
810
811         size = event->header.size;
812         size -= (void *)&event->attr.id - (void *)event;
813         mem_bswap_64(event->attr.id, size);
814 }
815
816 static void perf_event__event_update_swap(union perf_event *event,
817                                           bool sample_id_all __maybe_unused)
818 {
819         event->event_update.type = bswap_64(event->event_update.type);
820         event->event_update.id   = bswap_64(event->event_update.id);
821 }
822
823 static void perf_event__event_type_swap(union perf_event *event,
824                                         bool sample_id_all __maybe_unused)
825 {
826         event->event_type.event_type.event_id =
827                 bswap_64(event->event_type.event_type.event_id);
828 }
829
830 static void perf_event__tracing_data_swap(union perf_event *event,
831                                           bool sample_id_all __maybe_unused)
832 {
833         event->tracing_data.size = bswap_32(event->tracing_data.size);
834 }
835
836 static void perf_event__auxtrace_info_swap(union perf_event *event,
837                                            bool sample_id_all __maybe_unused)
838 {
839         size_t size;
840
841         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
842
843         size = event->header.size;
844         size -= (void *)&event->auxtrace_info.priv - (void *)event;
845         mem_bswap_64(event->auxtrace_info.priv, size);
846 }
847
848 static void perf_event__auxtrace_swap(union perf_event *event,
849                                       bool sample_id_all __maybe_unused)
850 {
851         event->auxtrace.size      = bswap_64(event->auxtrace.size);
852         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
853         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
854         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
855         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
856         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
857 }
858
859 static void perf_event__auxtrace_error_swap(union perf_event *event,
860                                             bool sample_id_all __maybe_unused)
861 {
862         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
863         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
864         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
865         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
866         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
867         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
868         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
869         if (event->auxtrace_error.fmt)
870                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
871 }
872
873 static void perf_event__thread_map_swap(union perf_event *event,
874                                         bool sample_id_all __maybe_unused)
875 {
876         unsigned i;
877
878         event->thread_map.nr = bswap_64(event->thread_map.nr);
879
880         for (i = 0; i < event->thread_map.nr; i++)
881                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
882 }
883
884 static void perf_event__cpu_map_swap(union perf_event *event,
885                                      bool sample_id_all __maybe_unused)
886 {
887         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
888         struct cpu_map_entries *cpus;
889         struct perf_record_record_cpu_map *mask;
890         unsigned i;
891
892         data->type = bswap_64(data->type);
893
894         switch (data->type) {
895         case PERF_CPU_MAP__CPUS:
896                 cpus = (struct cpu_map_entries *)data->data;
897
898                 cpus->nr = bswap_16(cpus->nr);
899
900                 for (i = 0; i < cpus->nr; i++)
901                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
902                 break;
903         case PERF_CPU_MAP__MASK:
904                 mask = (struct perf_record_record_cpu_map *)data->data;
905
906                 mask->nr = bswap_16(mask->nr);
907                 mask->long_size = bswap_16(mask->long_size);
908
909                 switch (mask->long_size) {
910                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
911                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
912                 default:
913                         pr_err("cpu_map swap: unsupported long size\n");
914                 }
915         default:
916                 break;
917         }
918 }
919
920 static void perf_event__stat_config_swap(union perf_event *event,
921                                          bool sample_id_all __maybe_unused)
922 {
923         u64 size;
924
925         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
926         size += 1; /* nr item itself */
927         mem_bswap_64(&event->stat_config.nr, size);
928 }
929
930 static void perf_event__stat_swap(union perf_event *event,
931                                   bool sample_id_all __maybe_unused)
932 {
933         event->stat.id     = bswap_64(event->stat.id);
934         event->stat.thread = bswap_32(event->stat.thread);
935         event->stat.cpu    = bswap_32(event->stat.cpu);
936         event->stat.val    = bswap_64(event->stat.val);
937         event->stat.ena    = bswap_64(event->stat.ena);
938         event->stat.run    = bswap_64(event->stat.run);
939 }
940
941 static void perf_event__stat_round_swap(union perf_event *event,
942                                         bool sample_id_all __maybe_unused)
943 {
944         event->stat_round.type = bswap_64(event->stat_round.type);
945         event->stat_round.time = bswap_64(event->stat_round.time);
946 }
947
948 static void perf_event__time_conv_swap(union perf_event *event,
949                                        bool sample_id_all __maybe_unused)
950 {
951         event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
952         event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
953         event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
954
955         if (event_contains(event->time_conv, time_cycles)) {
956                 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
957                 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
958         }
959 }
960
961 typedef void (*perf_event__swap_op)(union perf_event *event,
962                                     bool sample_id_all);
963
964 static perf_event__swap_op perf_event__swap_ops[] = {
965         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
966         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
967         [PERF_RECORD_COMM]                = perf_event__comm_swap,
968         [PERF_RECORD_FORK]                = perf_event__task_swap,
969         [PERF_RECORD_EXIT]                = perf_event__task_swap,
970         [PERF_RECORD_LOST]                = perf_event__all64_swap,
971         [PERF_RECORD_READ]                = perf_event__read_swap,
972         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
973         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
974         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
975         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
976         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
977         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
978         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
979         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
980         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
981         [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
982         [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
983         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
984         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
985         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
986         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
987         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
988         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
989         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
990         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
991         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
992         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
993         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
994         [PERF_RECORD_STAT]                = perf_event__stat_swap,
995         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
996         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
997         [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
998         [PERF_RECORD_HEADER_MAX]          = NULL,
999 };
1000
1001 /*
1002  * When perf record finishes a pass on every buffers, it records this pseudo
1003  * event.
1004  * We record the max timestamp t found in the pass n.
1005  * Assuming these timestamps are monotonic across cpus, we know that if
1006  * a buffer still has events with timestamps below t, they will be all
1007  * available and then read in the pass n + 1.
1008  * Hence when we start to read the pass n + 2, we can safely flush every
1009  * events with timestamps below t.
1010  *
1011  *    ============ PASS n =================
1012  *       CPU 0         |   CPU 1
1013  *                     |
1014  *    cnt1 timestamps  |   cnt2 timestamps
1015  *          1          |         2
1016  *          2          |         3
1017  *          -          |         4  <--- max recorded
1018  *
1019  *    ============ PASS n + 1 ==============
1020  *       CPU 0         |   CPU 1
1021  *                     |
1022  *    cnt1 timestamps  |   cnt2 timestamps
1023  *          3          |         5
1024  *          4          |         6
1025  *          5          |         7 <---- max recorded
1026  *
1027  *      Flush every events below timestamp 4
1028  *
1029  *    ============ PASS n + 2 ==============
1030  *       CPU 0         |   CPU 1
1031  *                     |
1032  *    cnt1 timestamps  |   cnt2 timestamps
1033  *          6          |         8
1034  *          7          |         9
1035  *          -          |         10
1036  *
1037  *      Flush every events below timestamp 7
1038  *      etc...
1039  */
1040 static int process_finished_round(struct perf_tool *tool __maybe_unused,
1041                                   union perf_event *event __maybe_unused,
1042                                   struct ordered_events *oe)
1043 {
1044         if (dump_trace)
1045                 fprintf(stdout, "\n");
1046         return ordered_events__flush(oe, OE_FLUSH__ROUND);
1047 }
1048
1049 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1050                               u64 timestamp, u64 file_offset)
1051 {
1052         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1053 }
1054
1055 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1056 {
1057         struct ip_callchain *callchain = sample->callchain;
1058         struct branch_stack *lbr_stack = sample->branch_stack;
1059         struct branch_entry *entries = perf_sample__branch_entries(sample);
1060         u64 kernel_callchain_nr = callchain->nr;
1061         unsigned int i;
1062
1063         for (i = 0; i < kernel_callchain_nr; i++) {
1064                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1065                         break;
1066         }
1067
1068         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1069                 u64 total_nr;
1070                 /*
1071                  * LBR callstack can only get user call chain,
1072                  * i is kernel call chain number,
1073                  * 1 is PERF_CONTEXT_USER.
1074                  *
1075                  * The user call chain is stored in LBR registers.
1076                  * LBR are pair registers. The caller is stored
1077                  * in "from" register, while the callee is stored
1078                  * in "to" register.
1079                  * For example, there is a call stack
1080                  * "A"->"B"->"C"->"D".
1081                  * The LBR registers will recorde like
1082                  * "C"->"D", "B"->"C", "A"->"B".
1083                  * So only the first "to" register and all "from"
1084                  * registers are needed to construct the whole stack.
1085                  */
1086                 total_nr = i + 1 + lbr_stack->nr + 1;
1087                 kernel_callchain_nr = i + 1;
1088
1089                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1090
1091                 for (i = 0; i < kernel_callchain_nr; i++)
1092                         printf("..... %2d: %016" PRIx64 "\n",
1093                                i, callchain->ips[i]);
1094
1095                 printf("..... %2d: %016" PRIx64 "\n",
1096                        (int)(kernel_callchain_nr), entries[0].to);
1097                 for (i = 0; i < lbr_stack->nr; i++)
1098                         printf("..... %2d: %016" PRIx64 "\n",
1099                                (int)(i + kernel_callchain_nr + 1), entries[i].from);
1100         }
1101 }
1102
1103 static void callchain__printf(struct evsel *evsel,
1104                               struct perf_sample *sample)
1105 {
1106         unsigned int i;
1107         struct ip_callchain *callchain = sample->callchain;
1108
1109         if (evsel__has_branch_callstack(evsel))
1110                 callchain__lbr_callstack_printf(sample);
1111
1112         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1113
1114         for (i = 0; i < callchain->nr; i++)
1115                 printf("..... %2d: %016" PRIx64 "\n",
1116                        i, callchain->ips[i]);
1117 }
1118
1119 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1120 {
1121         struct branch_entry *entries = perf_sample__branch_entries(sample);
1122         uint64_t i;
1123
1124         printf("%s: nr:%" PRIu64 "\n",
1125                 !callstack ? "... branch stack" : "... branch callstack",
1126                 sample->branch_stack->nr);
1127
1128         for (i = 0; i < sample->branch_stack->nr; i++) {
1129                 struct branch_entry *e = &entries[i];
1130
1131                 if (!callstack) {
1132                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1133                                 i, e->from, e->to,
1134                                 (unsigned short)e->flags.cycles,
1135                                 e->flags.mispred ? "M" : " ",
1136                                 e->flags.predicted ? "P" : " ",
1137                                 e->flags.abort ? "A" : " ",
1138                                 e->flags.in_tx ? "T" : " ",
1139                                 (unsigned)e->flags.reserved);
1140                 } else {
1141                         printf("..... %2"PRIu64": %016" PRIx64 "\n",
1142                                 i, i > 0 ? e->from : e->to);
1143                 }
1144         }
1145 }
1146
1147 static void regs_dump__printf(u64 mask, u64 *regs)
1148 {
1149         unsigned rid, i = 0;
1150
1151         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1152                 u64 val = regs[i++];
1153
1154                 printf(".... %-5s 0x%016" PRIx64 "\n",
1155                        perf_reg_name(rid), val);
1156         }
1157 }
1158
1159 static const char *regs_abi[] = {
1160         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1161         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1162         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1163 };
1164
1165 static inline const char *regs_dump_abi(struct regs_dump *d)
1166 {
1167         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1168                 return "unknown";
1169
1170         return regs_abi[d->abi];
1171 }
1172
1173 static void regs__printf(const char *type, struct regs_dump *regs)
1174 {
1175         u64 mask = regs->mask;
1176
1177         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1178                type,
1179                mask,
1180                regs_dump_abi(regs));
1181
1182         regs_dump__printf(mask, regs->regs);
1183 }
1184
1185 static void regs_user__printf(struct perf_sample *sample)
1186 {
1187         struct regs_dump *user_regs = &sample->user_regs;
1188
1189         if (user_regs->regs)
1190                 regs__printf("user", user_regs);
1191 }
1192
1193 static void regs_intr__printf(struct perf_sample *sample)
1194 {
1195         struct regs_dump *intr_regs = &sample->intr_regs;
1196
1197         if (intr_regs->regs)
1198                 regs__printf("intr", intr_regs);
1199 }
1200
1201 static void stack_user__printf(struct stack_dump *dump)
1202 {
1203         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1204                dump->size, dump->offset);
1205 }
1206
1207 static void perf_evlist__print_tstamp(struct evlist *evlist,
1208                                        union perf_event *event,
1209                                        struct perf_sample *sample)
1210 {
1211         u64 sample_type = __evlist__combined_sample_type(evlist);
1212
1213         if (event->header.type != PERF_RECORD_SAMPLE &&
1214             !evlist__sample_id_all(evlist)) {
1215                 fputs("-1 -1 ", stdout);
1216                 return;
1217         }
1218
1219         if ((sample_type & PERF_SAMPLE_CPU))
1220                 printf("%u ", sample->cpu);
1221
1222         if (sample_type & PERF_SAMPLE_TIME)
1223                 printf("%" PRIu64 " ", sample->time);
1224 }
1225
1226 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1227 {
1228         printf("... sample_read:\n");
1229
1230         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1231                 printf("...... time enabled %016" PRIx64 "\n",
1232                        sample->read.time_enabled);
1233
1234         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1235                 printf("...... time running %016" PRIx64 "\n",
1236                        sample->read.time_running);
1237
1238         if (read_format & PERF_FORMAT_GROUP) {
1239                 u64 i;
1240
1241                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1242
1243                 for (i = 0; i < sample->read.group.nr; i++) {
1244                         struct sample_read_value *value;
1245
1246                         value = &sample->read.group.values[i];
1247                         printf("..... id %016" PRIx64
1248                                ", value %016" PRIx64 "\n",
1249                                value->id, value->value);
1250                 }
1251         } else
1252                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1253                         sample->read.one.id, sample->read.one.value);
1254 }
1255
1256 static void dump_event(struct evlist *evlist, union perf_event *event,
1257                        u64 file_offset, struct perf_sample *sample)
1258 {
1259         if (!dump_trace)
1260                 return;
1261
1262         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1263                file_offset, event->header.size, event->header.type);
1264
1265         trace_event(event);
1266         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1267                 evlist->trace_event_sample_raw(evlist, event, sample);
1268
1269         if (sample)
1270                 perf_evlist__print_tstamp(evlist, event, sample);
1271
1272         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1273                event->header.size, perf_event__name(event->header.type));
1274 }
1275
1276 static void dump_sample(struct evsel *evsel, union perf_event *event,
1277                         struct perf_sample *sample)
1278 {
1279         u64 sample_type;
1280
1281         if (!dump_trace)
1282                 return;
1283
1284         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1285                event->header.misc, sample->pid, sample->tid, sample->ip,
1286                sample->period, sample->addr);
1287
1288         sample_type = evsel->core.attr.sample_type;
1289
1290         if (evsel__has_callchain(evsel))
1291                 callchain__printf(evsel, sample);
1292
1293         if (evsel__has_br_stack(evsel))
1294                 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1295
1296         if (sample_type & PERF_SAMPLE_REGS_USER)
1297                 regs_user__printf(sample);
1298
1299         if (sample_type & PERF_SAMPLE_REGS_INTR)
1300                 regs_intr__printf(sample);
1301
1302         if (sample_type & PERF_SAMPLE_STACK_USER)
1303                 stack_user__printf(&sample->user_stack);
1304
1305         if (sample_type & PERF_SAMPLE_WEIGHT)
1306                 printf("... weight: %" PRIu64 "\n", sample->weight);
1307
1308         if (sample_type & PERF_SAMPLE_DATA_SRC)
1309                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1310
1311         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1312                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1313
1314         if (sample_type & PERF_SAMPLE_TRANSACTION)
1315                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1316
1317         if (sample_type & PERF_SAMPLE_READ)
1318                 sample_read__printf(sample, evsel->core.attr.read_format);
1319 }
1320
1321 static void dump_read(struct evsel *evsel, union perf_event *event)
1322 {
1323         struct perf_record_read *read_event = &event->read;
1324         u64 read_format;
1325
1326         if (!dump_trace)
1327                 return;
1328
1329         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1330                evsel__name(evsel), event->read.value);
1331
1332         if (!evsel)
1333                 return;
1334
1335         read_format = evsel->core.attr.read_format;
1336
1337         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1338                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1339
1340         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1341                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1342
1343         if (read_format & PERF_FORMAT_ID)
1344                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1345 }
1346
1347 static struct machine *machines__find_for_cpumode(struct machines *machines,
1348                                                union perf_event *event,
1349                                                struct perf_sample *sample)
1350 {
1351         struct machine *machine;
1352
1353         if (perf_guest &&
1354             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1355              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1356                 u32 pid;
1357
1358                 if (event->header.type == PERF_RECORD_MMAP
1359                     || event->header.type == PERF_RECORD_MMAP2)
1360                         pid = event->mmap.pid;
1361                 else
1362                         pid = sample->pid;
1363
1364                 machine = machines__find(machines, pid);
1365                 if (!machine)
1366                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1367                 return machine;
1368         }
1369
1370         return &machines->host;
1371 }
1372
1373 static int deliver_sample_value(struct evlist *evlist,
1374                                 struct perf_tool *tool,
1375                                 union perf_event *event,
1376                                 struct perf_sample *sample,
1377                                 struct sample_read_value *v,
1378                                 struct machine *machine)
1379 {
1380         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1381         struct evsel *evsel;
1382
1383         if (sid) {
1384                 sample->id     = v->id;
1385                 sample->period = v->value - sid->period;
1386                 sid->period    = v->value;
1387         }
1388
1389         if (!sid || sid->evsel == NULL) {
1390                 ++evlist->stats.nr_unknown_id;
1391                 return 0;
1392         }
1393
1394         /*
1395          * There's no reason to deliver sample
1396          * for zero period, bail out.
1397          */
1398         if (!sample->period)
1399                 return 0;
1400
1401         evsel = container_of(sid->evsel, struct evsel, core);
1402         return tool->sample(tool, event, sample, evsel, machine);
1403 }
1404
1405 static int deliver_sample_group(struct evlist *evlist,
1406                                 struct perf_tool *tool,
1407                                 union  perf_event *event,
1408                                 struct perf_sample *sample,
1409                                 struct machine *machine)
1410 {
1411         int ret = -EINVAL;
1412         u64 i;
1413
1414         for (i = 0; i < sample->read.group.nr; i++) {
1415                 ret = deliver_sample_value(evlist, tool, event, sample,
1416                                            &sample->read.group.values[i],
1417                                            machine);
1418                 if (ret)
1419                         break;
1420         }
1421
1422         return ret;
1423 }
1424
1425 static int
1426  perf_evlist__deliver_sample(struct evlist *evlist,
1427                              struct perf_tool *tool,
1428                              union  perf_event *event,
1429                              struct perf_sample *sample,
1430                              struct evsel *evsel,
1431                              struct machine *machine)
1432 {
1433         /* We know evsel != NULL. */
1434         u64 sample_type = evsel->core.attr.sample_type;
1435         u64 read_format = evsel->core.attr.read_format;
1436
1437         /* Standard sample delivery. */
1438         if (!(sample_type & PERF_SAMPLE_READ))
1439                 return tool->sample(tool, event, sample, evsel, machine);
1440
1441         /* For PERF_SAMPLE_READ we have either single or group mode. */
1442         if (read_format & PERF_FORMAT_GROUP)
1443                 return deliver_sample_group(evlist, tool, event, sample,
1444                                             machine);
1445         else
1446                 return deliver_sample_value(evlist, tool, event, sample,
1447                                             &sample->read.one, machine);
1448 }
1449
1450 static int machines__deliver_event(struct machines *machines,
1451                                    struct evlist *evlist,
1452                                    union perf_event *event,
1453                                    struct perf_sample *sample,
1454                                    struct perf_tool *tool, u64 file_offset)
1455 {
1456         struct evsel *evsel;
1457         struct machine *machine;
1458
1459         dump_event(evlist, event, file_offset, sample);
1460
1461         evsel = perf_evlist__id2evsel(evlist, sample->id);
1462
1463         machine = machines__find_for_cpumode(machines, event, sample);
1464
1465         switch (event->header.type) {
1466         case PERF_RECORD_SAMPLE:
1467                 if (evsel == NULL) {
1468                         ++evlist->stats.nr_unknown_id;
1469                         return 0;
1470                 }
1471                 dump_sample(evsel, event, sample);
1472                 if (machine == NULL) {
1473                         ++evlist->stats.nr_unprocessable_samples;
1474                         return 0;
1475                 }
1476                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1477         case PERF_RECORD_MMAP:
1478                 return tool->mmap(tool, event, sample, machine);
1479         case PERF_RECORD_MMAP2:
1480                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1481                         ++evlist->stats.nr_proc_map_timeout;
1482                 return tool->mmap2(tool, event, sample, machine);
1483         case PERF_RECORD_COMM:
1484                 return tool->comm(tool, event, sample, machine);
1485         case PERF_RECORD_NAMESPACES:
1486                 return tool->namespaces(tool, event, sample, machine);
1487         case PERF_RECORD_CGROUP:
1488                 return tool->cgroup(tool, event, sample, machine);
1489         case PERF_RECORD_FORK:
1490                 return tool->fork(tool, event, sample, machine);
1491         case PERF_RECORD_EXIT:
1492                 return tool->exit(tool, event, sample, machine);
1493         case PERF_RECORD_LOST:
1494                 if (tool->lost == perf_event__process_lost)
1495                         evlist->stats.total_lost += event->lost.lost;
1496                 return tool->lost(tool, event, sample, machine);
1497         case PERF_RECORD_LOST_SAMPLES:
1498                 if (tool->lost_samples == perf_event__process_lost_samples)
1499                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1500                 return tool->lost_samples(tool, event, sample, machine);
1501         case PERF_RECORD_READ:
1502                 dump_read(evsel, event);
1503                 return tool->read(tool, event, sample, evsel, machine);
1504         case PERF_RECORD_THROTTLE:
1505                 return tool->throttle(tool, event, sample, machine);
1506         case PERF_RECORD_UNTHROTTLE:
1507                 return tool->unthrottle(tool, event, sample, machine);
1508         case PERF_RECORD_AUX:
1509                 if (tool->aux == perf_event__process_aux) {
1510                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1511                                 evlist->stats.total_aux_lost += 1;
1512                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1513                                 evlist->stats.total_aux_partial += 1;
1514                 }
1515                 return tool->aux(tool, event, sample, machine);
1516         case PERF_RECORD_ITRACE_START:
1517                 return tool->itrace_start(tool, event, sample, machine);
1518         case PERF_RECORD_SWITCH:
1519         case PERF_RECORD_SWITCH_CPU_WIDE:
1520                 return tool->context_switch(tool, event, sample, machine);
1521         case PERF_RECORD_KSYMBOL:
1522                 return tool->ksymbol(tool, event, sample, machine);
1523         case PERF_RECORD_BPF_EVENT:
1524                 return tool->bpf(tool, event, sample, machine);
1525         case PERF_RECORD_TEXT_POKE:
1526                 return tool->text_poke(tool, event, sample, machine);
1527         default:
1528                 ++evlist->stats.nr_unknown_events;
1529                 return -1;
1530         }
1531 }
1532
1533 static int perf_session__deliver_event(struct perf_session *session,
1534                                        union perf_event *event,
1535                                        struct perf_tool *tool,
1536                                        u64 file_offset)
1537 {
1538         struct perf_sample sample;
1539         int ret;
1540
1541         ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1542         if (ret) {
1543                 pr_err("Can't parse sample, err = %d\n", ret);
1544                 return ret;
1545         }
1546
1547         ret = auxtrace__process_event(session, event, &sample, tool);
1548         if (ret < 0)
1549                 return ret;
1550         if (ret > 0)
1551                 return 0;
1552
1553         ret = machines__deliver_event(&session->machines, session->evlist,
1554                                       event, &sample, tool, file_offset);
1555
1556         if (dump_trace && sample.aux_sample.size)
1557                 auxtrace__dump_auxtrace_sample(session, &sample);
1558
1559         return ret;
1560 }
1561
1562 static s64 perf_session__process_user_event(struct perf_session *session,
1563                                             union perf_event *event,
1564                                             u64 file_offset)
1565 {
1566         struct ordered_events *oe = &session->ordered_events;
1567         struct perf_tool *tool = session->tool;
1568         struct perf_sample sample = { .time = 0, };
1569         int fd = perf_data__fd(session->data);
1570         int err;
1571
1572         if (event->header.type != PERF_RECORD_COMPRESSED ||
1573             tool->compressed == perf_session__process_compressed_event_stub)
1574                 dump_event(session->evlist, event, file_offset, &sample);
1575
1576         /* These events are processed right away */
1577         switch (event->header.type) {
1578         case PERF_RECORD_HEADER_ATTR:
1579                 err = tool->attr(tool, event, &session->evlist);
1580                 if (err == 0) {
1581                         perf_session__set_id_hdr_size(session);
1582                         perf_session__set_comm_exec(session);
1583                 }
1584                 return err;
1585         case PERF_RECORD_EVENT_UPDATE:
1586                 return tool->event_update(tool, event, &session->evlist);
1587         case PERF_RECORD_HEADER_EVENT_TYPE:
1588                 /*
1589                  * Depreceated, but we need to handle it for sake
1590                  * of old data files create in pipe mode.
1591                  */
1592                 return 0;
1593         case PERF_RECORD_HEADER_TRACING_DATA:
1594                 /*
1595                  * Setup for reading amidst mmap, but only when we
1596                  * are in 'file' mode. The 'pipe' fd is in proper
1597                  * place already.
1598                  */
1599                 if (!perf_data__is_pipe(session->data))
1600                         lseek(fd, file_offset, SEEK_SET);
1601                 return tool->tracing_data(session, event);
1602         case PERF_RECORD_HEADER_BUILD_ID:
1603                 return tool->build_id(session, event);
1604         case PERF_RECORD_FINISHED_ROUND:
1605                 return tool->finished_round(tool, event, oe);
1606         case PERF_RECORD_ID_INDEX:
1607                 return tool->id_index(session, event);
1608         case PERF_RECORD_AUXTRACE_INFO:
1609                 return tool->auxtrace_info(session, event);
1610         case PERF_RECORD_AUXTRACE:
1611                 /* setup for reading amidst mmap */
1612                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1613                 return tool->auxtrace(session, event);
1614         case PERF_RECORD_AUXTRACE_ERROR:
1615                 perf_session__auxtrace_error_inc(session, event);
1616                 return tool->auxtrace_error(session, event);
1617         case PERF_RECORD_THREAD_MAP:
1618                 return tool->thread_map(session, event);
1619         case PERF_RECORD_CPU_MAP:
1620                 return tool->cpu_map(session, event);
1621         case PERF_RECORD_STAT_CONFIG:
1622                 return tool->stat_config(session, event);
1623         case PERF_RECORD_STAT:
1624                 return tool->stat(session, event);
1625         case PERF_RECORD_STAT_ROUND:
1626                 return tool->stat_round(session, event);
1627         case PERF_RECORD_TIME_CONV:
1628                 session->time_conv = event->time_conv;
1629                 return tool->time_conv(session, event);
1630         case PERF_RECORD_HEADER_FEATURE:
1631                 return tool->feature(session, event);
1632         case PERF_RECORD_COMPRESSED:
1633                 err = tool->compressed(session, event, file_offset);
1634                 if (err)
1635                         dump_event(session->evlist, event, file_offset, &sample);
1636                 return err;
1637         default:
1638                 return -EINVAL;
1639         }
1640 }
1641
1642 int perf_session__deliver_synth_event(struct perf_session *session,
1643                                       union perf_event *event,
1644                                       struct perf_sample *sample)
1645 {
1646         struct evlist *evlist = session->evlist;
1647         struct perf_tool *tool = session->tool;
1648
1649         events_stats__inc(&evlist->stats, event->header.type);
1650
1651         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1652                 return perf_session__process_user_event(session, event, 0);
1653
1654         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1655 }
1656
1657 static void event_swap(union perf_event *event, bool sample_id_all)
1658 {
1659         perf_event__swap_op swap;
1660
1661         swap = perf_event__swap_ops[event->header.type];
1662         if (swap)
1663                 swap(event, sample_id_all);
1664 }
1665
1666 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1667                              void *buf, size_t buf_sz,
1668                              union perf_event **event_ptr,
1669                              struct perf_sample *sample)
1670 {
1671         union perf_event *event;
1672         size_t hdr_sz, rest;
1673         int fd;
1674
1675         if (session->one_mmap && !session->header.needs_swap) {
1676                 event = file_offset - session->one_mmap_offset +
1677                         session->one_mmap_addr;
1678                 goto out_parse_sample;
1679         }
1680
1681         if (perf_data__is_pipe(session->data))
1682                 return -1;
1683
1684         fd = perf_data__fd(session->data);
1685         hdr_sz = sizeof(struct perf_event_header);
1686
1687         if (buf_sz < hdr_sz)
1688                 return -1;
1689
1690         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1691             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1692                 return -1;
1693
1694         event = (union perf_event *)buf;
1695
1696         if (session->header.needs_swap)
1697                 perf_event_header__bswap(&event->header);
1698
1699         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1700                 return -1;
1701
1702         buf += hdr_sz;
1703         rest = event->header.size - hdr_sz;
1704
1705         if (readn(fd, buf, rest) != (ssize_t)rest)
1706                 return -1;
1707
1708         if (session->header.needs_swap)
1709                 event_swap(event, evlist__sample_id_all(session->evlist));
1710
1711 out_parse_sample:
1712
1713         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1714             perf_evlist__parse_sample(session->evlist, event, sample))
1715                 return -1;
1716
1717         *event_ptr = event;
1718
1719         return 0;
1720 }
1721
1722 int perf_session__peek_events(struct perf_session *session, u64 offset,
1723                               u64 size, peek_events_cb_t cb, void *data)
1724 {
1725         u64 max_offset = offset + size;
1726         char buf[PERF_SAMPLE_MAX_SIZE];
1727         union perf_event *event;
1728         int err;
1729
1730         do {
1731                 err = perf_session__peek_event(session, offset, buf,
1732                                                PERF_SAMPLE_MAX_SIZE, &event,
1733                                                NULL);
1734                 if (err)
1735                         return err;
1736
1737                 err = cb(session, event, offset, data);
1738                 if (err)
1739                         return err;
1740
1741                 offset += event->header.size;
1742                 if (event->header.type == PERF_RECORD_AUXTRACE)
1743                         offset += event->auxtrace.size;
1744
1745         } while (offset < max_offset);
1746
1747         return err;
1748 }
1749
1750 static s64 perf_session__process_event(struct perf_session *session,
1751                                        union perf_event *event, u64 file_offset)
1752 {
1753         struct evlist *evlist = session->evlist;
1754         struct perf_tool *tool = session->tool;
1755         int ret;
1756
1757         if (session->header.needs_swap)
1758                 event_swap(event, evlist__sample_id_all(evlist));
1759
1760         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1761                 return -EINVAL;
1762
1763         events_stats__inc(&evlist->stats, event->header.type);
1764
1765         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1766                 return perf_session__process_user_event(session, event, file_offset);
1767
1768         if (tool->ordered_events) {
1769                 u64 timestamp = -1ULL;
1770
1771                 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1772                 if (ret && ret != -1)
1773                         return ret;
1774
1775                 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1776                 if (ret != -ETIME)
1777                         return ret;
1778         }
1779
1780         return perf_session__deliver_event(session, event, tool, file_offset);
1781 }
1782
1783 void perf_event_header__bswap(struct perf_event_header *hdr)
1784 {
1785         hdr->type = bswap_32(hdr->type);
1786         hdr->misc = bswap_16(hdr->misc);
1787         hdr->size = bswap_16(hdr->size);
1788 }
1789
1790 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1791 {
1792         return machine__findnew_thread(&session->machines.host, -1, pid);
1793 }
1794
1795 /*
1796  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1797  * So here a single thread is created for that, but actually there is a separate
1798  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1799  * is only 1. That causes problems for some tools, requiring workarounds. For
1800  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1801  */
1802 int perf_session__register_idle_thread(struct perf_session *session)
1803 {
1804         struct thread *thread;
1805         int err = 0;
1806
1807         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1808         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1809                 pr_err("problem inserting idle task.\n");
1810                 err = -1;
1811         }
1812
1813         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1814                 pr_err("problem inserting idle task.\n");
1815                 err = -1;
1816         }
1817
1818         /* machine__findnew_thread() got the thread, so put it */
1819         thread__put(thread);
1820         return err;
1821 }
1822
1823 static void
1824 perf_session__warn_order(const struct perf_session *session)
1825 {
1826         const struct ordered_events *oe = &session->ordered_events;
1827         struct evsel *evsel;
1828         bool should_warn = true;
1829
1830         evlist__for_each_entry(session->evlist, evsel) {
1831                 if (evsel->core.attr.write_backward)
1832                         should_warn = false;
1833         }
1834
1835         if (!should_warn)
1836                 return;
1837         if (oe->nr_unordered_events != 0)
1838                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1839 }
1840
1841 static void perf_session__warn_about_errors(const struct perf_session *session)
1842 {
1843         const struct events_stats *stats = &session->evlist->stats;
1844
1845         if (session->tool->lost == perf_event__process_lost &&
1846             stats->nr_events[PERF_RECORD_LOST] != 0) {
1847                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1848                             "Check IO/CPU overload!\n\n",
1849                             stats->nr_events[0],
1850                             stats->nr_events[PERF_RECORD_LOST]);
1851         }
1852
1853         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1854                 double drop_rate;
1855
1856                 drop_rate = (double)stats->total_lost_samples /
1857                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1858                 if (drop_rate > 0.05) {
1859                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1860                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1861                                     drop_rate * 100.0);
1862                 }
1863         }
1864
1865         if (session->tool->aux == perf_event__process_aux &&
1866             stats->total_aux_lost != 0) {
1867                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1868                             stats->total_aux_lost,
1869                             stats->nr_events[PERF_RECORD_AUX]);
1870         }
1871
1872         if (session->tool->aux == perf_event__process_aux &&
1873             stats->total_aux_partial != 0) {
1874                 bool vmm_exclusive = false;
1875
1876                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1877                                        &vmm_exclusive);
1878
1879                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1880                             "Are you running a KVM guest in the background?%s\n\n",
1881                             stats->total_aux_partial,
1882                             stats->nr_events[PERF_RECORD_AUX],
1883                             vmm_exclusive ?
1884                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1885                             "will reduce the gaps to only guest's timeslices." :
1886                             "");
1887         }
1888
1889         if (stats->nr_unknown_events != 0) {
1890                 ui__warning("Found %u unknown events!\n\n"
1891                             "Is this an older tool processing a perf.data "
1892                             "file generated by a more recent tool?\n\n"
1893                             "If that is not the case, consider "
1894                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1895                             stats->nr_unknown_events);
1896         }
1897
1898         if (stats->nr_unknown_id != 0) {
1899                 ui__warning("%u samples with id not present in the header\n",
1900                             stats->nr_unknown_id);
1901         }
1902
1903         if (stats->nr_invalid_chains != 0) {
1904                 ui__warning("Found invalid callchains!\n\n"
1905                             "%u out of %u events were discarded for this reason.\n\n"
1906                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1907                             stats->nr_invalid_chains,
1908                             stats->nr_events[PERF_RECORD_SAMPLE]);
1909         }
1910
1911         if (stats->nr_unprocessable_samples != 0) {
1912                 ui__warning("%u unprocessable samples recorded.\n"
1913                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1914                             stats->nr_unprocessable_samples);
1915         }
1916
1917         perf_session__warn_order(session);
1918
1919         events_stats__auxtrace_error_warn(stats);
1920
1921         if (stats->nr_proc_map_timeout != 0) {
1922                 ui__warning("%d map information files for pre-existing threads were\n"
1923                             "not processed, if there are samples for addresses they\n"
1924                             "will not be resolved, you may find out which are these\n"
1925                             "threads by running with -v and redirecting the output\n"
1926                             "to a file.\n"
1927                             "The time limit to process proc map is too short?\n"
1928                             "Increase it by --proc-map-timeout\n",
1929                             stats->nr_proc_map_timeout);
1930         }
1931 }
1932
1933 static int perf_session__flush_thread_stack(struct thread *thread,
1934                                             void *p __maybe_unused)
1935 {
1936         return thread_stack__flush(thread);
1937 }
1938
1939 static int perf_session__flush_thread_stacks(struct perf_session *session)
1940 {
1941         return machines__for_each_thread(&session->machines,
1942                                          perf_session__flush_thread_stack,
1943                                          NULL);
1944 }
1945
1946 volatile int session_done;
1947
1948 static int __perf_session__process_decomp_events(struct perf_session *session);
1949
1950 static int __perf_session__process_pipe_events(struct perf_session *session)
1951 {
1952         struct ordered_events *oe = &session->ordered_events;
1953         struct perf_tool *tool = session->tool;
1954         int fd = perf_data__fd(session->data);
1955         union perf_event *event;
1956         uint32_t size, cur_size = 0;
1957         void *buf = NULL;
1958         s64 skip = 0;
1959         u64 head;
1960         ssize_t err;
1961         void *p;
1962
1963         perf_tool__fill_defaults(tool);
1964
1965         head = 0;
1966         cur_size = sizeof(union perf_event);
1967
1968         buf = malloc(cur_size);
1969         if (!buf)
1970                 return -errno;
1971         ordered_events__set_copy_on_queue(oe, true);
1972 more:
1973         event = buf;
1974         err = readn(fd, event, sizeof(struct perf_event_header));
1975         if (err <= 0) {
1976                 if (err == 0)
1977                         goto done;
1978
1979                 pr_err("failed to read event header\n");
1980                 goto out_err;
1981         }
1982
1983         if (session->header.needs_swap)
1984                 perf_event_header__bswap(&event->header);
1985
1986         size = event->header.size;
1987         if (size < sizeof(struct perf_event_header)) {
1988                 pr_err("bad event header size\n");
1989                 goto out_err;
1990         }
1991
1992         if (size > cur_size) {
1993                 void *new = realloc(buf, size);
1994                 if (!new) {
1995                         pr_err("failed to allocate memory to read event\n");
1996                         goto out_err;
1997                 }
1998                 buf = new;
1999                 cur_size = size;
2000                 event = buf;
2001         }
2002         p = event;
2003         p += sizeof(struct perf_event_header);
2004
2005         if (size - sizeof(struct perf_event_header)) {
2006                 err = readn(fd, p, size - sizeof(struct perf_event_header));
2007                 if (err <= 0) {
2008                         if (err == 0) {
2009                                 pr_err("unexpected end of event stream\n");
2010                                 goto done;
2011                         }
2012
2013                         pr_err("failed to read event data\n");
2014                         goto out_err;
2015                 }
2016         }
2017
2018         if ((skip = perf_session__process_event(session, event, head)) < 0) {
2019                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2020                        head, event->header.size, event->header.type);
2021                 err = -EINVAL;
2022                 goto out_err;
2023         }
2024
2025         head += size;
2026
2027         if (skip > 0)
2028                 head += skip;
2029
2030         err = __perf_session__process_decomp_events(session);
2031         if (err)
2032                 goto out_err;
2033
2034         if (!session_done())
2035                 goto more;
2036 done:
2037         /* do the final flush for ordered samples */
2038         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2039         if (err)
2040                 goto out_err;
2041         err = auxtrace__flush_events(session, tool);
2042         if (err)
2043                 goto out_err;
2044         err = perf_session__flush_thread_stacks(session);
2045 out_err:
2046         free(buf);
2047         if (!tool->no_warn)
2048                 perf_session__warn_about_errors(session);
2049         ordered_events__free(&session->ordered_events);
2050         auxtrace__free_events(session);
2051         return err;
2052 }
2053
2054 static union perf_event *
2055 prefetch_event(char *buf, u64 head, size_t mmap_size,
2056                bool needs_swap, union perf_event *error)
2057 {
2058         union perf_event *event;
2059         u16 event_size;
2060
2061         /*
2062          * Ensure we have enough space remaining to read
2063          * the size of the event in the headers.
2064          */
2065         if (head + sizeof(event->header) > mmap_size)
2066                 return NULL;
2067
2068         event = (union perf_event *)(buf + head);
2069         if (needs_swap)
2070                 perf_event_header__bswap(&event->header);
2071
2072         event_size = event->header.size;
2073         if (head + event_size <= mmap_size)
2074                 return event;
2075
2076         /* We're not fetching the event so swap back again */
2077         if (needs_swap)
2078                 perf_event_header__bswap(&event->header);
2079
2080         /* Check if the event fits into the next mmapped buf. */
2081         if (event_size <= mmap_size - head % page_size) {
2082                 /* Remap buf and fetch again. */
2083                 return NULL;
2084         }
2085
2086         /* Invalid input. Event size should never exceed mmap_size. */
2087         pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
2088                  " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
2089
2090         return error;
2091 }
2092
2093 static union perf_event *
2094 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2095 {
2096         return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2097 }
2098
2099 static union perf_event *
2100 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2101 {
2102         return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2103 }
2104
2105 static int __perf_session__process_decomp_events(struct perf_session *session)
2106 {
2107         s64 skip;
2108         u64 size, file_pos = 0;
2109         struct decomp *decomp = session->decomp_last;
2110
2111         if (!decomp)
2112                 return 0;
2113
2114         while (decomp->head < decomp->size && !session_done()) {
2115                 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2116                                                              session->header.needs_swap);
2117
2118                 if (!event)
2119                         break;
2120
2121                 size = event->header.size;
2122
2123                 if (size < sizeof(struct perf_event_header) ||
2124                     (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2125                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2126                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2127                         return -EINVAL;
2128                 }
2129
2130                 if (skip)
2131                         size += skip;
2132
2133                 decomp->head += size;
2134         }
2135
2136         return 0;
2137 }
2138
2139 /*
2140  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2141  * slices. On 32bit we use 32MB.
2142  */
2143 #if BITS_PER_LONG == 64
2144 #define MMAP_SIZE ULLONG_MAX
2145 #define NUM_MMAPS 1
2146 #else
2147 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2148 #define NUM_MMAPS 128
2149 #endif
2150
2151 struct reader;
2152
2153 typedef s64 (*reader_cb_t)(struct perf_session *session,
2154                            union perf_event *event,
2155                            u64 file_offset);
2156
2157 struct reader {
2158         int              fd;
2159         u64              data_size;
2160         u64              data_offset;
2161         reader_cb_t      process;
2162 };
2163
2164 static int
2165 reader__process_events(struct reader *rd, struct perf_session *session,
2166                        struct ui_progress *prog)
2167 {
2168         u64 data_size = rd->data_size;
2169         u64 head, page_offset, file_offset, file_pos, size;
2170         int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2171         size_t  mmap_size;
2172         char *buf, *mmaps[NUM_MMAPS];
2173         union perf_event *event;
2174         s64 skip;
2175
2176         page_offset = page_size * (rd->data_offset / page_size);
2177         file_offset = page_offset;
2178         head = rd->data_offset - page_offset;
2179
2180         ui_progress__init_size(prog, data_size, "Processing events...");
2181
2182         data_size += rd->data_offset;
2183
2184         mmap_size = MMAP_SIZE;
2185         if (mmap_size > data_size) {
2186                 mmap_size = data_size;
2187                 session->one_mmap = true;
2188         }
2189
2190         memset(mmaps, 0, sizeof(mmaps));
2191
2192         mmap_prot  = PROT_READ;
2193         mmap_flags = MAP_SHARED;
2194
2195         if (session->header.needs_swap) {
2196                 mmap_prot  |= PROT_WRITE;
2197                 mmap_flags = MAP_PRIVATE;
2198         }
2199 remap:
2200         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2201                    file_offset);
2202         if (buf == MAP_FAILED) {
2203                 pr_err("failed to mmap file\n");
2204                 err = -errno;
2205                 goto out;
2206         }
2207         mmaps[map_idx] = buf;
2208         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2209         file_pos = file_offset + head;
2210         if (session->one_mmap) {
2211                 session->one_mmap_addr = buf;
2212                 session->one_mmap_offset = file_offset;
2213         }
2214
2215 more:
2216         event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2217         if (IS_ERR(event))
2218                 return PTR_ERR(event);
2219
2220         if (!event) {
2221                 if (mmaps[map_idx]) {
2222                         munmap(mmaps[map_idx], mmap_size);
2223                         mmaps[map_idx] = NULL;
2224                 }
2225
2226                 page_offset = page_size * (head / page_size);
2227                 file_offset += page_offset;
2228                 head -= page_offset;
2229                 goto remap;
2230         }
2231
2232         size = event->header.size;
2233
2234         skip = -EINVAL;
2235
2236         if (size < sizeof(struct perf_event_header) ||
2237             (skip = rd->process(session, event, file_pos)) < 0) {
2238                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2239                        file_offset + head, event->header.size,
2240                        event->header.type, strerror(-skip));
2241                 err = skip;
2242                 goto out;
2243         }
2244
2245         if (skip)
2246                 size += skip;
2247
2248         head += size;
2249         file_pos += size;
2250
2251         err = __perf_session__process_decomp_events(session);
2252         if (err)
2253                 goto out;
2254
2255         ui_progress__update(prog, size);
2256
2257         if (session_done())
2258                 goto out;
2259
2260         if (file_pos < data_size)
2261                 goto more;
2262
2263 out:
2264         return err;
2265 }
2266
2267 static s64 process_simple(struct perf_session *session,
2268                           union perf_event *event,
2269                           u64 file_offset)
2270 {
2271         return perf_session__process_event(session, event, file_offset);
2272 }
2273
2274 static int __perf_session__process_events(struct perf_session *session)
2275 {
2276         struct reader rd = {
2277                 .fd             = perf_data__fd(session->data),
2278                 .data_size      = session->header.data_size,
2279                 .data_offset    = session->header.data_offset,
2280                 .process        = process_simple,
2281         };
2282         struct ordered_events *oe = &session->ordered_events;
2283         struct perf_tool *tool = session->tool;
2284         struct ui_progress prog;
2285         int err;
2286
2287         perf_tool__fill_defaults(tool);
2288
2289         if (rd.data_size == 0)
2290                 return -1;
2291
2292         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2293
2294         err = reader__process_events(&rd, session, &prog);
2295         if (err)
2296                 goto out_err;
2297         /* do the final flush for ordered samples */
2298         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2299         if (err)
2300                 goto out_err;
2301         err = auxtrace__flush_events(session, tool);
2302         if (err)
2303                 goto out_err;
2304         err = perf_session__flush_thread_stacks(session);
2305 out_err:
2306         ui_progress__finish();
2307         if (!tool->no_warn)
2308                 perf_session__warn_about_errors(session);
2309         /*
2310          * We may switching perf.data output, make ordered_events
2311          * reusable.
2312          */
2313         ordered_events__reinit(&session->ordered_events);
2314         auxtrace__free_events(session);
2315         session->one_mmap = false;
2316         return err;
2317 }
2318
2319 int perf_session__process_events(struct perf_session *session)
2320 {
2321         if (perf_session__register_idle_thread(session) < 0)
2322                 return -ENOMEM;
2323
2324         if (perf_data__is_pipe(session->data))
2325                 return __perf_session__process_pipe_events(session);
2326
2327         return __perf_session__process_events(session);
2328 }
2329
2330 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2331 {
2332         struct evsel *evsel;
2333
2334         evlist__for_each_entry(session->evlist, evsel) {
2335                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2336                         return true;
2337         }
2338
2339         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2340         return false;
2341 }
2342
2343 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2344 {
2345         char *bracket;
2346         struct ref_reloc_sym *ref;
2347         struct kmap *kmap;
2348
2349         ref = zalloc(sizeof(struct ref_reloc_sym));
2350         if (ref == NULL)
2351                 return -ENOMEM;
2352
2353         ref->name = strdup(symbol_name);
2354         if (ref->name == NULL) {
2355                 free(ref);
2356                 return -ENOMEM;
2357         }
2358
2359         bracket = strchr(ref->name, ']');
2360         if (bracket)
2361                 *bracket = '\0';
2362
2363         ref->addr = addr;
2364
2365         kmap = map__kmap(map);
2366         if (kmap)
2367                 kmap->ref_reloc_sym = ref;
2368
2369         return 0;
2370 }
2371
2372 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2373 {
2374         return machines__fprintf_dsos(&session->machines, fp);
2375 }
2376
2377 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2378                                           bool (skip)(struct dso *dso, int parm), int parm)
2379 {
2380         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2381 }
2382
2383 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2384 {
2385         size_t ret;
2386         const char *msg = "";
2387
2388         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2389                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2390
2391         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2392
2393         ret += events_stats__fprintf(&session->evlist->stats, fp);
2394         return ret;
2395 }
2396
2397 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2398 {
2399         /*
2400          * FIXME: Here we have to actually print all the machines in this
2401          * session, not just the host...
2402          */
2403         return machine__fprintf(&session->machines.host, fp);
2404 }
2405
2406 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2407                                               unsigned int type)
2408 {
2409         struct evsel *pos;
2410
2411         evlist__for_each_entry(session->evlist, pos) {
2412                 if (pos->core.attr.type == type)
2413                         return pos;
2414         }
2415         return NULL;
2416 }
2417
2418 int perf_session__cpu_bitmap(struct perf_session *session,
2419                              const char *cpu_list, unsigned long *cpu_bitmap)
2420 {
2421         int i, err = -1;
2422         struct perf_cpu_map *map;
2423         int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2424
2425         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2426                 struct evsel *evsel;
2427
2428                 evsel = perf_session__find_first_evtype(session, i);
2429                 if (!evsel)
2430                         continue;
2431
2432                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2433                         pr_err("File does not contain CPU events. "
2434                                "Remove -C option to proceed.\n");
2435                         return -1;
2436                 }
2437         }
2438
2439         map = perf_cpu_map__new(cpu_list);
2440         if (map == NULL) {
2441                 pr_err("Invalid cpu_list\n");
2442                 return -1;
2443         }
2444
2445         for (i = 0; i < map->nr; i++) {
2446                 int cpu = map->map[i];
2447
2448                 if (cpu >= nr_cpus) {
2449                         pr_err("Requested CPU %d too large. "
2450                                "Consider raising MAX_NR_CPUS\n", cpu);
2451                         goto out_delete_map;
2452                 }
2453
2454                 set_bit(cpu, cpu_bitmap);
2455         }
2456
2457         err = 0;
2458
2459 out_delete_map:
2460         perf_cpu_map__put(map);
2461         return err;
2462 }
2463
2464 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2465                                 bool full)
2466 {
2467         if (session == NULL || fp == NULL)
2468                 return;
2469
2470         fprintf(fp, "# ========\n");
2471         perf_header__fprintf_info(session, fp, full);
2472         fprintf(fp, "# ========\n#\n");
2473 }
2474
2475 int perf_event__process_id_index(struct perf_session *session,
2476                                  union perf_event *event)
2477 {
2478         struct evlist *evlist = session->evlist;
2479         struct perf_record_id_index *ie = &event->id_index;
2480         size_t i, nr, max_nr;
2481
2482         max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2483                  sizeof(struct id_index_entry);
2484         nr = ie->nr;
2485         if (nr > max_nr)
2486                 return -EINVAL;
2487
2488         if (dump_trace)
2489                 fprintf(stdout, " nr: %zu\n", nr);
2490
2491         for (i = 0; i < nr; i++) {
2492                 struct id_index_entry *e = &ie->entries[i];
2493                 struct perf_sample_id *sid;
2494
2495                 if (dump_trace) {
2496                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2497                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2498                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2499                         fprintf(stdout, "  tid: %"PRI_ld64"\n", e->tid);
2500                 }
2501
2502                 sid = perf_evlist__id2sid(evlist, e->id);
2503                 if (!sid)
2504                         return -ENOENT;
2505                 sid->idx = e->idx;
2506                 sid->cpu = e->cpu;
2507                 sid->tid = e->tid;
2508         }
2509         return 0;
2510 }