2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #include <linux/kernel.h>
21 #include <linux/types.h>
36 #include "thread-stack.h"
38 #include "callchain.h"
46 #include "intel-pt-decoder/intel-pt-log.h"
47 #include "intel-pt-decoder/intel-pt-decoder.h"
48 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
49 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
51 #define MAX_TIMESTAMP (~0ULL)
54 struct auxtrace auxtrace;
55 struct auxtrace_queues queues;
56 struct auxtrace_heap heap;
58 struct perf_session *session;
59 struct machine *machine;
60 struct perf_evsel *switch_evsel;
61 struct thread *unknown_thread;
62 bool timeless_decoding;
71 int have_sched_switch;
77 struct perf_tsc_conversion tc;
78 bool cap_user_time_zero;
80 struct itrace_synth_opts synth_opts;
82 bool sample_instructions;
83 u64 instructions_sample_type;
88 u64 branches_sample_type;
91 bool sample_transactions;
92 u64 transactions_sample_type;
96 u64 ptwrites_sample_type;
99 bool sample_pwr_events;
100 u64 pwr_events_sample_type;
107 bool synth_needs_swap;
116 unsigned max_non_turbo_ratio;
119 unsigned long num_events;
122 struct addr_filters filts;
126 INTEL_PT_SS_NOT_TRACING,
129 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
130 INTEL_PT_SS_EXPECTING_SWITCH_IP,
133 struct intel_pt_queue {
135 unsigned int queue_nr;
136 struct auxtrace_buffer *buffer;
138 const struct intel_pt_state *state;
139 struct ip_callchain *chain;
140 struct branch_stack *last_branch;
141 struct branch_stack *last_branch_rb;
142 size_t last_branch_pos;
143 union perf_event *event_buf;
146 bool step_through_buffers;
147 bool use_buffer_pid_tid;
153 struct thread *thread;
161 char insn[INTEL_PT_INSN_BUF_SZ];
164 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
165 unsigned char *buf, size_t len)
167 struct intel_pt_pkt packet;
170 char desc[INTEL_PT_PKT_DESC_MAX];
171 const char *color = PERF_COLOR_BLUE;
173 color_fprintf(stdout, color,
174 ". ... Intel Processor Trace data: size %zu bytes\n",
178 ret = intel_pt_get_packet(buf, len, &packet);
184 color_fprintf(stdout, color, " %08x: ", pos);
185 for (i = 0; i < pkt_len; i++)
186 color_fprintf(stdout, color, " %02x", buf[i]);
188 color_fprintf(stdout, color, " ");
190 ret = intel_pt_pkt_desc(&packet, desc,
191 INTEL_PT_PKT_DESC_MAX);
193 color_fprintf(stdout, color, " %s\n", desc);
195 color_fprintf(stdout, color, " Bad packet!\n");
203 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
207 intel_pt_dump(pt, buf, len);
210 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
211 struct auxtrace_buffer *b)
213 bool consecutive = false;
216 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
217 pt->have_tsc, &consecutive);
220 b->use_size = b->data + b->size - start;
222 if (b->use_size && consecutive)
223 b->consecutive = true;
227 static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq,
228 struct auxtrace_queue *queue,
229 struct auxtrace_buffer *buffer)
231 if (queue->cpu == -1 && buffer->cpu != -1)
232 ptq->cpu = buffer->cpu;
234 ptq->pid = buffer->pid;
235 ptq->tid = buffer->tid;
237 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
238 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
240 thread__zput(ptq->thread);
242 if (ptq->tid != -1) {
244 ptq->thread = machine__findnew_thread(ptq->pt->machine,
248 ptq->thread = machine__find_thread(ptq->pt->machine, -1,
253 /* This function assumes data is processed sequentially only */
254 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
256 struct intel_pt_queue *ptq = data;
257 struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer;
258 struct auxtrace_queue *queue;
265 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
267 buffer = auxtrace_buffer__next(queue, buffer);
270 auxtrace_buffer__drop_data(old_buffer);
275 ptq->buffer = buffer;
278 int fd = perf_data_file__fd(ptq->pt->session->file);
280 buffer->data = auxtrace_buffer__get_data(buffer, fd);
285 if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
286 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
289 if (buffer->use_data) {
290 b->len = buffer->use_size;
291 b->buf = buffer->use_data;
293 b->len = buffer->size;
294 b->buf = buffer->data;
296 b->ref_timestamp = buffer->reference;
299 * If in snapshot mode and the buffer has no usable data, get next
300 * buffer and again check overlap against old_buffer.
302 if (ptq->pt->snapshot_mode && !b->len)
306 auxtrace_buffer__drop_data(old_buffer);
308 if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
309 !buffer->consecutive)) {
310 b->consecutive = false;
311 b->trace_nr = buffer->buffer_nr + 1;
313 b->consecutive = true;
316 if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid ||
317 ptq->tid != buffer->tid))
318 intel_pt_use_buffer_pid_tid(ptq, queue, buffer);
320 if (ptq->step_through_buffers)
324 return intel_pt_get_trace(b, data);
329 struct intel_pt_cache_entry {
330 struct auxtrace_cache_entry entry;
333 enum intel_pt_insn_op op;
334 enum intel_pt_insn_branch branch;
337 char insn[INTEL_PT_INSN_BUF_SZ];
340 static int intel_pt_config_div(const char *var, const char *value, void *data)
345 if (!strcmp(var, "intel-pt.cache-divisor")) {
346 val = strtol(value, NULL, 0);
347 if (val > 0 && val <= INT_MAX)
354 static int intel_pt_cache_divisor(void)
361 perf_config(intel_pt_config_div, &d);
369 static unsigned int intel_pt_cache_size(struct dso *dso,
370 struct machine *machine)
374 size = dso__data_size(dso, machine);
375 size /= intel_pt_cache_divisor();
378 if (size > (1 << 21))
380 return 32 - __builtin_clz(size);
383 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
384 struct machine *machine)
386 struct auxtrace_cache *c;
389 if (dso->auxtrace_cache)
390 return dso->auxtrace_cache;
392 bits = intel_pt_cache_size(dso, machine);
394 /* Ignoring cache creation failure */
395 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
397 dso->auxtrace_cache = c;
402 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
403 u64 offset, u64 insn_cnt, u64 byte_cnt,
404 struct intel_pt_insn *intel_pt_insn)
406 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
407 struct intel_pt_cache_entry *e;
413 e = auxtrace_cache__alloc_entry(c);
417 e->insn_cnt = insn_cnt;
418 e->byte_cnt = byte_cnt;
419 e->op = intel_pt_insn->op;
420 e->branch = intel_pt_insn->branch;
421 e->length = intel_pt_insn->length;
422 e->rel = intel_pt_insn->rel;
423 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
425 err = auxtrace_cache__add(c, offset, &e->entry);
427 auxtrace_cache__free_entry(c, e);
432 static struct intel_pt_cache_entry *
433 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
435 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
440 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
443 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
444 uint64_t *insn_cnt_ptr, uint64_t *ip,
445 uint64_t to_ip, uint64_t max_insn_cnt,
448 struct intel_pt_queue *ptq = data;
449 struct machine *machine = ptq->pt->machine;
450 struct thread *thread;
451 struct addr_location al;
452 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
456 u64 offset, start_offset, start_ip;
460 intel_pt_insn->length = 0;
462 if (to_ip && *ip == to_ip)
465 if (*ip >= ptq->pt->kernel_start)
466 cpumode = PERF_RECORD_MISC_KERNEL;
468 cpumode = PERF_RECORD_MISC_USER;
470 thread = ptq->thread;
472 if (cpumode != PERF_RECORD_MISC_KERNEL)
474 thread = ptq->pt->unknown_thread;
478 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
479 if (!al.map || !al.map->dso)
482 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
483 dso__data_status_seen(al.map->dso,
484 DSO_DATA_STATUS_SEEN_ITRACE))
487 offset = al.map->map_ip(al.map, *ip);
489 if (!to_ip && one_map) {
490 struct intel_pt_cache_entry *e;
492 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
494 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
495 *insn_cnt_ptr = e->insn_cnt;
497 intel_pt_insn->op = e->op;
498 intel_pt_insn->branch = e->branch;
499 intel_pt_insn->length = e->length;
500 intel_pt_insn->rel = e->rel;
501 memcpy(intel_pt_insn->buf, e->insn,
502 INTEL_PT_INSN_BUF_SZ);
503 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
508 start_offset = offset;
511 /* Load maps to ensure dso->is_64_bit has been updated */
514 x86_64 = al.map->dso->is_64_bit;
517 len = dso__data_read_offset(al.map->dso, machine,
519 INTEL_PT_INSN_BUF_SZ);
523 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
526 intel_pt_log_insn(intel_pt_insn, *ip);
530 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
533 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
536 *ip += intel_pt_insn->length;
538 if (to_ip && *ip == to_ip) {
539 intel_pt_insn->length = 0;
543 if (*ip >= al.map->end)
546 offset += intel_pt_insn->length;
551 *insn_cnt_ptr = insn_cnt;
557 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
561 struct intel_pt_cache_entry *e;
563 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
568 /* Ignore cache errors */
569 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
570 *ip - start_ip, intel_pt_insn);
575 *insn_cnt_ptr = insn_cnt;
579 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
580 uint64_t offset, const char *filename)
582 struct addr_filter *filt;
583 bool have_filter = false;
584 bool hit_tracestop = false;
585 bool hit_filter = false;
587 list_for_each_entry(filt, &pt->filts.head, list) {
591 if ((filename && !filt->filename) ||
592 (!filename && filt->filename) ||
593 (filename && strcmp(filename, filt->filename)))
596 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
599 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
600 ip, offset, filename ? filename : "[kernel]",
601 filt->start ? "filter" : "stop",
602 filt->addr, filt->size);
607 hit_tracestop = true;
610 if (!hit_tracestop && !hit_filter)
611 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
612 ip, offset, filename ? filename : "[kernel]");
614 return hit_tracestop || (have_filter && !hit_filter);
617 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
619 struct intel_pt_queue *ptq = data;
620 struct thread *thread;
621 struct addr_location al;
625 if (ip >= ptq->pt->kernel_start)
626 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
628 cpumode = PERF_RECORD_MISC_USER;
630 thread = ptq->thread;
634 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
635 if (!al.map || !al.map->dso)
638 offset = al.map->map_ip(al.map, ip);
640 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
641 al.map->dso->long_name);
644 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
646 return __intel_pt_pgd_ip(ip, data) > 0;
649 static bool intel_pt_get_config(struct intel_pt *pt,
650 struct perf_event_attr *attr, u64 *config)
652 if (attr->type == pt->pmu_type) {
654 *config = attr->config;
661 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
663 struct perf_evsel *evsel;
665 evlist__for_each_entry(pt->session->evlist, evsel) {
666 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
667 !evsel->attr.exclude_kernel)
673 static bool intel_pt_return_compression(struct intel_pt *pt)
675 struct perf_evsel *evsel;
678 if (!pt->noretcomp_bit)
681 evlist__for_each_entry(pt->session->evlist, evsel) {
682 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
683 (config & pt->noretcomp_bit))
689 static bool intel_pt_branch_enable(struct intel_pt *pt)
691 struct perf_evsel *evsel;
694 evlist__for_each_entry(pt->session->evlist, evsel) {
695 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
696 (config & 1) && !(config & 0x2000))
702 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
704 struct perf_evsel *evsel;
708 if (!pt->mtc_freq_bits)
711 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
714 evlist__for_each_entry(pt->session->evlist, evsel) {
715 if (intel_pt_get_config(pt, &evsel->attr, &config))
716 return (config & pt->mtc_freq_bits) >> shift;
721 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
723 struct perf_evsel *evsel;
724 bool timeless_decoding = true;
727 if (!pt->tsc_bit || !pt->cap_user_time_zero)
730 evlist__for_each_entry(pt->session->evlist, evsel) {
731 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
733 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
734 if (config & pt->tsc_bit)
735 timeless_decoding = false;
740 return timeless_decoding;
743 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
745 struct perf_evsel *evsel;
747 evlist__for_each_entry(pt->session->evlist, evsel) {
748 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
749 !evsel->attr.exclude_kernel)
755 static bool intel_pt_have_tsc(struct intel_pt *pt)
757 struct perf_evsel *evsel;
758 bool have_tsc = false;
764 evlist__for_each_entry(pt->session->evlist, evsel) {
765 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
766 if (config & pt->tsc_bit)
775 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
779 quot = ns / pt->tc.time_mult;
780 rem = ns % pt->tc.time_mult;
781 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
785 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
786 unsigned int queue_nr)
788 struct intel_pt_params params = { .get_trace = 0, };
789 struct perf_env *env = pt->machine->env;
790 struct intel_pt_queue *ptq;
792 ptq = zalloc(sizeof(struct intel_pt_queue));
796 if (pt->synth_opts.callchain) {
797 size_t sz = sizeof(struct ip_callchain);
799 sz += pt->synth_opts.callchain_sz * sizeof(u64);
800 ptq->chain = zalloc(sz);
805 if (pt->synth_opts.last_branch) {
806 size_t sz = sizeof(struct branch_stack);
808 sz += pt->synth_opts.last_branch_sz *
809 sizeof(struct branch_entry);
810 ptq->last_branch = zalloc(sz);
811 if (!ptq->last_branch)
813 ptq->last_branch_rb = zalloc(sz);
814 if (!ptq->last_branch_rb)
818 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
823 ptq->queue_nr = queue_nr;
824 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
830 params.get_trace = intel_pt_get_trace;
831 params.walk_insn = intel_pt_walk_next_insn;
833 params.return_compression = intel_pt_return_compression(pt);
834 params.branch_enable = intel_pt_branch_enable(pt);
835 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
836 params.mtc_period = intel_pt_mtc_period(pt);
837 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
838 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
840 if (pt->filts.cnt > 0)
841 params.pgd_ip = intel_pt_pgd_ip;
843 if (pt->synth_opts.instructions) {
844 if (pt->synth_opts.period) {
845 switch (pt->synth_opts.period_type) {
846 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
848 INTEL_PT_PERIOD_INSTRUCTIONS;
849 params.period = pt->synth_opts.period;
851 case PERF_ITRACE_PERIOD_TICKS:
852 params.period_type = INTEL_PT_PERIOD_TICKS;
853 params.period = pt->synth_opts.period;
855 case PERF_ITRACE_PERIOD_NANOSECS:
856 params.period_type = INTEL_PT_PERIOD_TICKS;
857 params.period = intel_pt_ns_to_ticks(pt,
858 pt->synth_opts.period);
865 if (!params.period) {
866 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
871 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
872 params.flags |= INTEL_PT_FUP_WITH_NLIP;
874 ptq->decoder = intel_pt_decoder_new(¶ms);
881 zfree(&ptq->event_buf);
882 zfree(&ptq->last_branch);
883 zfree(&ptq->last_branch_rb);
889 static void intel_pt_free_queue(void *priv)
891 struct intel_pt_queue *ptq = priv;
895 thread__zput(ptq->thread);
896 intel_pt_decoder_free(ptq->decoder);
897 zfree(&ptq->event_buf);
898 zfree(&ptq->last_branch);
899 zfree(&ptq->last_branch_rb);
904 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
905 struct auxtrace_queue *queue)
907 struct intel_pt_queue *ptq = queue->priv;
909 if (queue->tid == -1 || pt->have_sched_switch) {
910 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
913 thread__zput(ptq->thread);
916 if (!ptq->thread && ptq->tid != -1)
917 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
920 ptq->pid = ptq->thread->pid_;
921 if (queue->cpu == -1)
922 ptq->cpu = ptq->thread->cpu;
926 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
929 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
930 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
931 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
932 if (ptq->state->to_ip)
933 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
935 PERF_IP_FLAG_INTERRUPT;
937 ptq->flags = PERF_IP_FLAG_BRANCH |
938 PERF_IP_FLAG_TRACE_END;
941 if (ptq->state->from_ip)
942 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
944 ptq->flags = PERF_IP_FLAG_BRANCH |
945 PERF_IP_FLAG_TRACE_BEGIN;
946 if (ptq->state->flags & INTEL_PT_IN_TX)
947 ptq->flags |= PERF_IP_FLAG_IN_TX;
948 ptq->insn_len = ptq->state->insn_len;
949 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
953 static int intel_pt_setup_queue(struct intel_pt *pt,
954 struct auxtrace_queue *queue,
955 unsigned int queue_nr)
957 struct intel_pt_queue *ptq = queue->priv;
959 if (list_empty(&queue->head))
963 ptq = intel_pt_alloc_queue(pt, queue_nr);
968 if (queue->cpu != -1)
969 ptq->cpu = queue->cpu;
970 ptq->tid = queue->tid;
972 if (pt->sampling_mode) {
973 if (pt->timeless_decoding)
974 ptq->step_through_buffers = true;
975 if (pt->timeless_decoding || !pt->have_sched_switch)
976 ptq->use_buffer_pid_tid = true;
979 ptq->sync_switch = pt->sync_switch;
983 (!ptq->sync_switch ||
984 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
985 const struct intel_pt_state *state;
988 if (pt->timeless_decoding)
991 intel_pt_log("queue %u getting timestamp\n", queue_nr);
992 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
993 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
995 state = intel_pt_decode(ptq->decoder);
997 if (state->err == INTEL_PT_ERR_NODATA) {
998 intel_pt_log("queue %u has no timestamp\n",
1004 if (state->timestamp)
1008 ptq->timestamp = state->timestamp;
1009 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1010 queue_nr, ptq->timestamp);
1012 ptq->have_sample = true;
1013 intel_pt_sample_flags(ptq);
1014 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1017 ptq->on_heap = true;
1023 static int intel_pt_setup_queues(struct intel_pt *pt)
1028 for (i = 0; i < pt->queues.nr_queues; i++) {
1029 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1036 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
1038 struct branch_stack *bs_src = ptq->last_branch_rb;
1039 struct branch_stack *bs_dst = ptq->last_branch;
1042 bs_dst->nr = bs_src->nr;
1047 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1048 memcpy(&bs_dst->entries[0],
1049 &bs_src->entries[ptq->last_branch_pos],
1050 sizeof(struct branch_entry) * nr);
1052 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1053 memcpy(&bs_dst->entries[nr],
1054 &bs_src->entries[0],
1055 sizeof(struct branch_entry) * ptq->last_branch_pos);
1059 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1061 ptq->last_branch_pos = 0;
1062 ptq->last_branch_rb->nr = 0;
1065 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1067 const struct intel_pt_state *state = ptq->state;
1068 struct branch_stack *bs = ptq->last_branch_rb;
1069 struct branch_entry *be;
1071 if (!ptq->last_branch_pos)
1072 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1074 ptq->last_branch_pos -= 1;
1076 be = &bs->entries[ptq->last_branch_pos];
1077 be->from = state->from_ip;
1078 be->to = state->to_ip;
1079 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1080 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1081 /* No support for mispredict */
1082 be->flags.mispred = ptq->pt->mispred_all;
1084 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1088 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1090 return pt->synth_opts.initial_skip &&
1091 pt->num_events++ < pt->synth_opts.initial_skip;
1094 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1095 struct intel_pt_queue *ptq,
1096 union perf_event *event,
1097 struct perf_sample *sample)
1099 event->sample.header.type = PERF_RECORD_SAMPLE;
1100 event->sample.header.misc = PERF_RECORD_MISC_USER;
1101 event->sample.header.size = sizeof(struct perf_event_header);
1103 if (!pt->timeless_decoding)
1104 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1106 sample->cpumode = PERF_RECORD_MISC_USER;
1107 sample->ip = ptq->state->from_ip;
1108 sample->pid = ptq->pid;
1109 sample->tid = ptq->tid;
1110 sample->addr = ptq->state->to_ip;
1112 sample->cpu = ptq->cpu;
1113 sample->flags = ptq->flags;
1114 sample->insn_len = ptq->insn_len;
1115 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1118 static int intel_pt_inject_event(union perf_event *event,
1119 struct perf_sample *sample, u64 type,
1122 event->header.size = perf_event__sample_event_size(sample, type, 0);
1123 return perf_event__synthesize_sample(event, type, 0, sample, swapped);
1126 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1127 union perf_event *event,
1128 struct perf_sample *sample, u64 type)
1130 if (!pt->synth_opts.inject)
1133 return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
1136 static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
1137 union perf_event *event,
1138 struct perf_sample *sample, u64 type)
1142 ret = intel_pt_opt_inject(pt, event, sample, type);
1146 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1148 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1153 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1155 struct intel_pt *pt = ptq->pt;
1156 union perf_event *event = ptq->event_buf;
1157 struct perf_sample sample = { .ip = 0, };
1158 struct dummy_branch_stack {
1160 struct branch_entry entries;
1163 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1166 if (intel_pt_skip_event(pt))
1169 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1171 sample.id = ptq->pt->branches_id;
1172 sample.stream_id = ptq->pt->branches_id;
1175 * perf report cannot handle events without a branch stack when using
1176 * SORT_MODE__BRANCH so make a dummy one.
1178 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1179 dummy_bs = (struct dummy_branch_stack){
1186 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1189 return intel_pt_deliver_synth_b_event(pt, event, &sample,
1190 pt->branches_sample_type);
1193 static void intel_pt_prep_sample(struct intel_pt *pt,
1194 struct intel_pt_queue *ptq,
1195 union perf_event *event,
1196 struct perf_sample *sample)
1198 intel_pt_prep_b_sample(pt, ptq, event, sample);
1200 if (pt->synth_opts.callchain) {
1201 thread_stack__sample(ptq->thread, ptq->chain,
1202 pt->synth_opts.callchain_sz, sample->ip);
1203 sample->callchain = ptq->chain;
1206 if (pt->synth_opts.last_branch) {
1207 intel_pt_copy_last_branch_rb(ptq);
1208 sample->branch_stack = ptq->last_branch;
1212 static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
1213 struct intel_pt_queue *ptq,
1214 union perf_event *event,
1215 struct perf_sample *sample,
1220 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
1222 if (pt->synth_opts.last_branch)
1223 intel_pt_reset_last_branch_rb(ptq);
1228 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1230 struct intel_pt *pt = ptq->pt;
1231 union perf_event *event = ptq->event_buf;
1232 struct perf_sample sample = { .ip = 0, };
1234 if (intel_pt_skip_event(pt))
1237 intel_pt_prep_sample(pt, ptq, event, &sample);
1239 sample.id = ptq->pt->instructions_id;
1240 sample.stream_id = ptq->pt->instructions_id;
1241 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1243 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1245 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1246 pt->instructions_sample_type);
1249 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1251 struct intel_pt *pt = ptq->pt;
1252 union perf_event *event = ptq->event_buf;
1253 struct perf_sample sample = { .ip = 0, };
1255 if (intel_pt_skip_event(pt))
1258 intel_pt_prep_sample(pt, ptq, event, &sample);
1260 sample.id = ptq->pt->transactions_id;
1261 sample.stream_id = ptq->pt->transactions_id;
1263 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1264 pt->transactions_sample_type);
1267 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1268 struct intel_pt_queue *ptq,
1269 union perf_event *event,
1270 struct perf_sample *sample)
1272 intel_pt_prep_sample(pt, ptq, event, sample);
1275 * Zero IP is used to mean "trace start" but that is not the case for
1276 * power or PTWRITE events with no IP, so clear the flags.
1282 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1284 struct intel_pt *pt = ptq->pt;
1285 union perf_event *event = ptq->event_buf;
1286 struct perf_sample sample = { .ip = 0, };
1287 struct perf_synth_intel_ptwrite raw;
1289 if (intel_pt_skip_event(pt))
1292 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1294 sample.id = ptq->pt->ptwrites_id;
1295 sample.stream_id = ptq->pt->ptwrites_id;
1298 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1299 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1301 sample.raw_size = perf_synth__raw_size(raw);
1302 sample.raw_data = perf_synth__raw_data(&raw);
1304 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1305 pt->ptwrites_sample_type);
1308 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1310 struct intel_pt *pt = ptq->pt;
1311 union perf_event *event = ptq->event_buf;
1312 struct perf_sample sample = { .ip = 0, };
1313 struct perf_synth_intel_cbr raw;
1316 if (intel_pt_skip_event(pt))
1319 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1321 sample.id = ptq->pt->cbr_id;
1322 sample.stream_id = ptq->pt->cbr_id;
1324 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1325 raw.flags = cpu_to_le32(flags);
1326 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1329 sample.raw_size = perf_synth__raw_size(raw);
1330 sample.raw_data = perf_synth__raw_data(&raw);
1332 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1333 pt->pwr_events_sample_type);
1336 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1338 struct intel_pt *pt = ptq->pt;
1339 union perf_event *event = ptq->event_buf;
1340 struct perf_sample sample = { .ip = 0, };
1341 struct perf_synth_intel_mwait raw;
1343 if (intel_pt_skip_event(pt))
1346 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1348 sample.id = ptq->pt->mwait_id;
1349 sample.stream_id = ptq->pt->mwait_id;
1352 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1354 sample.raw_size = perf_synth__raw_size(raw);
1355 sample.raw_data = perf_synth__raw_data(&raw);
1357 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1358 pt->pwr_events_sample_type);
1361 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1363 struct intel_pt *pt = ptq->pt;
1364 union perf_event *event = ptq->event_buf;
1365 struct perf_sample sample = { .ip = 0, };
1366 struct perf_synth_intel_pwre raw;
1368 if (intel_pt_skip_event(pt))
1371 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1373 sample.id = ptq->pt->pwre_id;
1374 sample.stream_id = ptq->pt->pwre_id;
1377 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1379 sample.raw_size = perf_synth__raw_size(raw);
1380 sample.raw_data = perf_synth__raw_data(&raw);
1382 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1383 pt->pwr_events_sample_type);
1386 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1388 struct intel_pt *pt = ptq->pt;
1389 union perf_event *event = ptq->event_buf;
1390 struct perf_sample sample = { .ip = 0, };
1391 struct perf_synth_intel_exstop raw;
1393 if (intel_pt_skip_event(pt))
1396 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1398 sample.id = ptq->pt->exstop_id;
1399 sample.stream_id = ptq->pt->exstop_id;
1402 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1404 sample.raw_size = perf_synth__raw_size(raw);
1405 sample.raw_data = perf_synth__raw_data(&raw);
1407 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1408 pt->pwr_events_sample_type);
1411 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1413 struct intel_pt *pt = ptq->pt;
1414 union perf_event *event = ptq->event_buf;
1415 struct perf_sample sample = { .ip = 0, };
1416 struct perf_synth_intel_pwrx raw;
1418 if (intel_pt_skip_event(pt))
1421 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1423 sample.id = ptq->pt->pwrx_id;
1424 sample.stream_id = ptq->pt->pwrx_id;
1427 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1429 sample.raw_size = perf_synth__raw_size(raw);
1430 sample.raw_data = perf_synth__raw_data(&raw);
1432 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1433 pt->pwr_events_sample_type);
1436 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1437 pid_t pid, pid_t tid, u64 ip)
1439 union perf_event event;
1440 char msg[MAX_AUXTRACE_ERROR_MSG];
1443 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1445 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1446 code, cpu, pid, tid, ip, msg);
1448 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1450 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1456 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1458 struct auxtrace_queue *queue;
1459 pid_t tid = ptq->next_tid;
1465 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1467 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1469 queue = &pt->queues.queue_array[ptq->queue_nr];
1470 intel_pt_set_pid_tid_cpu(pt, queue);
1477 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1479 struct intel_pt *pt = ptq->pt;
1481 return ip == pt->switch_ip &&
1482 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1483 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1484 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1487 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1488 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
1491 static int intel_pt_sample(struct intel_pt_queue *ptq)
1493 const struct intel_pt_state *state = ptq->state;
1494 struct intel_pt *pt = ptq->pt;
1497 if (!ptq->have_sample)
1500 ptq->have_sample = false;
1502 if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
1503 if (state->type & INTEL_PT_CBR_CHG) {
1504 err = intel_pt_synth_cbr_sample(ptq);
1508 if (state->type & INTEL_PT_MWAIT_OP) {
1509 err = intel_pt_synth_mwait_sample(ptq);
1513 if (state->type & INTEL_PT_PWR_ENTRY) {
1514 err = intel_pt_synth_pwre_sample(ptq);
1518 if (state->type & INTEL_PT_EX_STOP) {
1519 err = intel_pt_synth_exstop_sample(ptq);
1523 if (state->type & INTEL_PT_PWR_EXIT) {
1524 err = intel_pt_synth_pwrx_sample(ptq);
1530 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
1531 err = intel_pt_synth_instruction_sample(ptq);
1536 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
1537 err = intel_pt_synth_transaction_sample(ptq);
1542 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1543 err = intel_pt_synth_ptwrite_sample(ptq);
1548 if (!(state->type & INTEL_PT_BRANCH))
1551 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1552 thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1553 state->to_ip, ptq->insn_len,
1556 thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1558 if (pt->sample_branches) {
1559 err = intel_pt_synth_branch_sample(ptq);
1564 if (pt->synth_opts.last_branch)
1565 intel_pt_update_last_branch_rb(ptq);
1567 if (!ptq->sync_switch)
1570 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1571 switch (ptq->switch_state) {
1572 case INTEL_PT_SS_NOT_TRACING:
1573 case INTEL_PT_SS_UNKNOWN:
1574 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1575 err = intel_pt_next_tid(pt, ptq);
1578 ptq->switch_state = INTEL_PT_SS_TRACING;
1581 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1584 } else if (!state->to_ip) {
1585 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1586 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1587 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1588 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1589 state->to_ip == pt->ptss_ip &&
1590 (ptq->flags & PERF_IP_FLAG_CALL)) {
1591 ptq->switch_state = INTEL_PT_SS_TRACING;
1597 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1599 struct machine *machine = pt->machine;
1601 struct symbol *sym, *start;
1602 u64 ip, switch_ip = 0;
1608 map = machine__kernel_map(machine);
1615 start = dso__first_symbol(map->dso, MAP__FUNCTION);
1617 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1618 if (sym->binding == STB_GLOBAL &&
1619 !strcmp(sym->name, "__switch_to")) {
1620 ip = map->unmap_ip(map, sym->start);
1621 if (ip >= map->start && ip < map->end) {
1628 if (!switch_ip || !ptss_ip)
1631 if (pt->have_sched_switch == 1)
1632 ptss = "perf_trace_sched_switch";
1634 ptss = "__perf_event_task_sched_out";
1636 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1637 if (!strcmp(sym->name, ptss)) {
1638 ip = map->unmap_ip(map, sym->start);
1639 if (ip >= map->start && ip < map->end) {
1649 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
1653 pt->sync_switch = true;
1655 for (i = 0; i < pt->queues.nr_queues; i++) {
1656 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1657 struct intel_pt_queue *ptq = queue->priv;
1660 ptq->sync_switch = true;
1664 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1666 const struct intel_pt_state *state = ptq->state;
1667 struct intel_pt *pt = ptq->pt;
1670 if (!pt->kernel_start) {
1671 pt->kernel_start = machine__kernel_start(pt->machine);
1672 if (pt->per_cpu_mmaps &&
1673 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1674 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1675 !pt->sampling_mode) {
1676 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1677 if (pt->switch_ip) {
1678 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1679 pt->switch_ip, pt->ptss_ip);
1680 intel_pt_enable_sync_switch(pt);
1685 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1686 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1688 err = intel_pt_sample(ptq);
1692 state = intel_pt_decode(ptq->decoder);
1694 if (state->err == INTEL_PT_ERR_NODATA)
1696 if (ptq->sync_switch &&
1697 state->from_ip >= pt->kernel_start) {
1698 ptq->sync_switch = false;
1699 intel_pt_next_tid(pt, ptq);
1701 if (pt->synth_opts.errors) {
1702 err = intel_pt_synth_error(pt, state->err,
1713 ptq->have_sample = true;
1714 intel_pt_sample_flags(ptq);
1716 /* Use estimated TSC upon return to user space */
1718 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1719 state->to_ip && state->to_ip < pt->kernel_start) {
1720 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1721 state->timestamp, state->est_timestamp);
1722 ptq->timestamp = state->est_timestamp;
1723 /* Use estimated TSC in unknown switch state */
1724 } else if (ptq->sync_switch &&
1725 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1726 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1727 ptq->next_tid == -1) {
1728 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1729 state->timestamp, state->est_timestamp);
1730 ptq->timestamp = state->est_timestamp;
1731 } else if (state->timestamp > ptq->timestamp) {
1732 ptq->timestamp = state->timestamp;
1735 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1736 *timestamp = ptq->timestamp;
1743 static inline int intel_pt_update_queues(struct intel_pt *pt)
1745 if (pt->queues.new_data) {
1746 pt->queues.new_data = false;
1747 return intel_pt_setup_queues(pt);
1752 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1754 unsigned int queue_nr;
1759 struct auxtrace_queue *queue;
1760 struct intel_pt_queue *ptq;
1762 if (!pt->heap.heap_cnt)
1765 if (pt->heap.heap_array[0].ordinal >= timestamp)
1768 queue_nr = pt->heap.heap_array[0].queue_nr;
1769 queue = &pt->queues.queue_array[queue_nr];
1772 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1773 queue_nr, pt->heap.heap_array[0].ordinal,
1776 auxtrace_heap__pop(&pt->heap);
1778 if (pt->heap.heap_cnt) {
1779 ts = pt->heap.heap_array[0].ordinal + 1;
1786 intel_pt_set_pid_tid_cpu(pt, queue);
1788 ret = intel_pt_run_decoder(ptq, &ts);
1791 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1796 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1800 ptq->on_heap = false;
1807 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1810 struct auxtrace_queues *queues = &pt->queues;
1814 for (i = 0; i < queues->nr_queues; i++) {
1815 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1816 struct intel_pt_queue *ptq = queue->priv;
1818 if (ptq && (tid == -1 || ptq->tid == tid)) {
1820 intel_pt_set_pid_tid_cpu(pt, queue);
1821 intel_pt_run_decoder(ptq, &ts);
1827 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1829 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1830 sample->pid, sample->tid, 0);
1833 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1837 if (cpu < 0 || !pt->queues.nr_queues)
1840 if ((unsigned)cpu >= pt->queues.nr_queues)
1841 i = pt->queues.nr_queues - 1;
1845 if (pt->queues.queue_array[i].cpu == cpu)
1846 return pt->queues.queue_array[i].priv;
1848 for (j = 0; i > 0; j++) {
1849 if (pt->queues.queue_array[--i].cpu == cpu)
1850 return pt->queues.queue_array[i].priv;
1853 for (; j < pt->queues.nr_queues; j++) {
1854 if (pt->queues.queue_array[j].cpu == cpu)
1855 return pt->queues.queue_array[j].priv;
1861 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1864 struct intel_pt_queue *ptq;
1867 if (!pt->sync_switch)
1870 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1871 if (!ptq || !ptq->sync_switch)
1874 switch (ptq->switch_state) {
1875 case INTEL_PT_SS_NOT_TRACING:
1878 case INTEL_PT_SS_UNKNOWN:
1879 case INTEL_PT_SS_TRACING:
1880 ptq->next_tid = tid;
1881 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1883 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1884 if (!ptq->on_heap) {
1885 ptq->timestamp = perf_time_to_tsc(timestamp,
1887 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1891 ptq->on_heap = true;
1893 ptq->switch_state = INTEL_PT_SS_TRACING;
1895 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1896 ptq->next_tid = tid;
1897 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1906 static int intel_pt_process_switch(struct intel_pt *pt,
1907 struct perf_sample *sample)
1909 struct perf_evsel *evsel;
1913 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1914 if (evsel != pt->switch_evsel)
1917 tid = perf_evsel__intval(evsel, sample, "next_pid");
1920 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1921 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1924 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1928 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1931 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1932 struct perf_sample *sample)
1934 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1940 if (pt->have_sched_switch == 3) {
1943 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1944 pr_err("Expecting CPU-wide context switch event\n");
1947 pid = event->context_switch.next_prev_pid;
1948 tid = event->context_switch.next_prev_tid;
1957 intel_pt_log("context_switch event has no tid\n");
1959 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1960 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1963 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1967 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1970 static int intel_pt_process_itrace_start(struct intel_pt *pt,
1971 union perf_event *event,
1972 struct perf_sample *sample)
1974 if (!pt->per_cpu_mmaps)
1977 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1978 sample->cpu, event->itrace_start.pid,
1979 event->itrace_start.tid, sample->time,
1980 perf_time_to_tsc(sample->time, &pt->tc));
1982 return machine__set_current_tid(pt->machine, sample->cpu,
1983 event->itrace_start.pid,
1984 event->itrace_start.tid);
1987 static int intel_pt_process_event(struct perf_session *session,
1988 union perf_event *event,
1989 struct perf_sample *sample,
1990 struct perf_tool *tool)
1992 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2000 if (!tool->ordered_events) {
2001 pr_err("Intel Processor Trace requires ordered events\n");
2005 if (sample->time && sample->time != (u64)-1)
2006 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2010 if (timestamp || pt->timeless_decoding) {
2011 err = intel_pt_update_queues(pt);
2016 if (pt->timeless_decoding) {
2017 if (event->header.type == PERF_RECORD_EXIT) {
2018 err = intel_pt_process_timeless_queues(pt,
2022 } else if (timestamp) {
2023 err = intel_pt_process_queues(pt, timestamp);
2028 if (event->header.type == PERF_RECORD_AUX &&
2029 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2030 pt->synth_opts.errors) {
2031 err = intel_pt_lost(pt, sample);
2036 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2037 err = intel_pt_process_switch(pt, sample);
2038 else if (event->header.type == PERF_RECORD_ITRACE_START)
2039 err = intel_pt_process_itrace_start(pt, event, sample);
2040 else if (event->header.type == PERF_RECORD_SWITCH ||
2041 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2042 err = intel_pt_context_switch(pt, event, sample);
2044 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
2045 perf_event__name(event->header.type), event->header.type,
2046 sample->cpu, sample->time, timestamp);
2051 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2053 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2060 if (!tool->ordered_events)
2063 ret = intel_pt_update_queues(pt);
2067 if (pt->timeless_decoding)
2068 return intel_pt_process_timeless_queues(pt, -1,
2071 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2074 static void intel_pt_free_events(struct perf_session *session)
2076 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2078 struct auxtrace_queues *queues = &pt->queues;
2081 for (i = 0; i < queues->nr_queues; i++) {
2082 intel_pt_free_queue(queues->queue_array[i].priv);
2083 queues->queue_array[i].priv = NULL;
2085 intel_pt_log_disable();
2086 auxtrace_queues__free(queues);
2089 static void intel_pt_free(struct perf_session *session)
2091 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2094 auxtrace_heap__free(&pt->heap);
2095 intel_pt_free_events(session);
2096 session->auxtrace = NULL;
2097 thread__put(pt->unknown_thread);
2098 addr_filters__exit(&pt->filts);
2103 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2104 union perf_event *event,
2105 struct perf_tool *tool __maybe_unused)
2107 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2110 if (pt->sampling_mode)
2113 if (!pt->data_queued) {
2114 struct auxtrace_buffer *buffer;
2116 int fd = perf_data_file__fd(session->file);
2119 if (perf_data_file__is_pipe(session->file)) {
2122 data_offset = lseek(fd, 0, SEEK_CUR);
2123 if (data_offset == -1)
2127 err = auxtrace_queues__add_event(&pt->queues, session, event,
2128 data_offset, &buffer);
2132 /* Dump here now we have copied a piped trace out of the pipe */
2134 if (auxtrace_buffer__get_data(buffer, fd)) {
2135 intel_pt_dump_event(pt, buffer->data,
2137 auxtrace_buffer__put_data(buffer);
2145 struct intel_pt_synth {
2146 struct perf_tool dummy_tool;
2147 struct perf_session *session;
2150 static int intel_pt_event_synth(struct perf_tool *tool,
2151 union perf_event *event,
2152 struct perf_sample *sample __maybe_unused,
2153 struct machine *machine __maybe_unused)
2155 struct intel_pt_synth *intel_pt_synth =
2156 container_of(tool, struct intel_pt_synth, dummy_tool);
2158 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2162 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2163 struct perf_event_attr *attr, u64 id)
2165 struct intel_pt_synth intel_pt_synth;
2168 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2169 name, id, (u64)attr->sample_type);
2171 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2172 intel_pt_synth.session = session;
2174 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2175 &id, intel_pt_event_synth);
2177 pr_err("%s: failed to synthesize '%s' event type\n",
2183 static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
2186 struct perf_evsel *evsel;
2188 evlist__for_each_entry(evlist, evsel) {
2189 if (evsel->id && evsel->id[0] == id) {
2191 zfree(&evsel->name);
2192 evsel->name = strdup(name);
2198 static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
2199 struct perf_evlist *evlist)
2201 struct perf_evsel *evsel;
2203 evlist__for_each_entry(evlist, evsel) {
2204 if (evsel->attr.type == pt->pmu_type && evsel->ids)
2211 static int intel_pt_synth_events(struct intel_pt *pt,
2212 struct perf_session *session)
2214 struct perf_evlist *evlist = session->evlist;
2215 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
2216 struct perf_event_attr attr;
2221 pr_debug("There are no selected events with Intel Processor Trace data\n");
2225 memset(&attr, 0, sizeof(struct perf_event_attr));
2226 attr.size = sizeof(struct perf_event_attr);
2227 attr.type = PERF_TYPE_HARDWARE;
2228 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
2229 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2231 if (pt->timeless_decoding)
2232 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2234 attr.sample_type |= PERF_SAMPLE_TIME;
2235 if (!pt->per_cpu_mmaps)
2236 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2237 attr.exclude_user = evsel->attr.exclude_user;
2238 attr.exclude_kernel = evsel->attr.exclude_kernel;
2239 attr.exclude_hv = evsel->attr.exclude_hv;
2240 attr.exclude_host = evsel->attr.exclude_host;
2241 attr.exclude_guest = evsel->attr.exclude_guest;
2242 attr.sample_id_all = evsel->attr.sample_id_all;
2243 attr.read_format = evsel->attr.read_format;
2245 id = evsel->id[0] + 1000000000;
2249 if (pt->synth_opts.branches) {
2250 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2251 attr.sample_period = 1;
2252 attr.sample_type |= PERF_SAMPLE_ADDR;
2253 err = intel_pt_synth_event(session, "branches", &attr, id);
2256 pt->sample_branches = true;
2257 pt->branches_sample_type = attr.sample_type;
2258 pt->branches_id = id;
2260 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2263 if (pt->synth_opts.callchain)
2264 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2265 if (pt->synth_opts.last_branch)
2266 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2268 if (pt->synth_opts.instructions) {
2269 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2270 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2271 attr.sample_period =
2272 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2274 attr.sample_period = pt->synth_opts.period;
2275 err = intel_pt_synth_event(session, "instructions", &attr, id);
2278 pt->sample_instructions = true;
2279 pt->instructions_sample_type = attr.sample_type;
2280 pt->instructions_id = id;
2284 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2285 attr.sample_period = 1;
2287 if (pt->synth_opts.transactions) {
2288 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2289 err = intel_pt_synth_event(session, "transactions", &attr, id);
2292 pt->sample_transactions = true;
2293 pt->transactions_sample_type = attr.sample_type;
2294 pt->transactions_id = id;
2295 intel_pt_set_event_name(evlist, id, "transactions");
2299 attr.type = PERF_TYPE_SYNTH;
2300 attr.sample_type |= PERF_SAMPLE_RAW;
2302 if (pt->synth_opts.ptwrites) {
2303 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2304 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2307 pt->sample_ptwrites = true;
2308 pt->ptwrites_sample_type = attr.sample_type;
2309 pt->ptwrites_id = id;
2310 intel_pt_set_event_name(evlist, id, "ptwrite");
2314 if (pt->synth_opts.pwr_events) {
2315 pt->sample_pwr_events = true;
2316 pt->pwr_events_sample_type = attr.sample_type;
2318 attr.config = PERF_SYNTH_INTEL_CBR;
2319 err = intel_pt_synth_event(session, "cbr", &attr, id);
2323 intel_pt_set_event_name(evlist, id, "cbr");
2327 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
2328 attr.config = PERF_SYNTH_INTEL_MWAIT;
2329 err = intel_pt_synth_event(session, "mwait", &attr, id);
2333 intel_pt_set_event_name(evlist, id, "mwait");
2336 attr.config = PERF_SYNTH_INTEL_PWRE;
2337 err = intel_pt_synth_event(session, "pwre", &attr, id);
2341 intel_pt_set_event_name(evlist, id, "pwre");
2344 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2345 err = intel_pt_synth_event(session, "exstop", &attr, id);
2349 intel_pt_set_event_name(evlist, id, "exstop");
2352 attr.config = PERF_SYNTH_INTEL_PWRX;
2353 err = intel_pt_synth_event(session, "pwrx", &attr, id);
2357 intel_pt_set_event_name(evlist, id, "pwrx");
2361 pt->synth_needs_swap = evsel->needs_swap;
2366 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2368 struct perf_evsel *evsel;
2370 evlist__for_each_entry_reverse(evlist, evsel) {
2371 const char *name = perf_evsel__name(evsel);
2373 if (!strcmp(name, "sched:sched_switch"))
2380 static bool intel_pt_find_switch(struct perf_evlist *evlist)
2382 struct perf_evsel *evsel;
2384 evlist__for_each_entry(evlist, evsel) {
2385 if (evsel->attr.context_switch)
2392 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2394 struct intel_pt *pt = data;
2396 if (!strcmp(var, "intel-pt.mispred-all"))
2397 pt->mispred_all = perf_config_bool(var, value);
2402 static const char * const intel_pt_info_fmts[] = {
2403 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
2404 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
2405 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
2406 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
2407 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
2408 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
2409 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
2410 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
2411 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
2412 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
2413 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
2414 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
2415 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
2416 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
2417 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
2418 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
2421 static void intel_pt_print_info(u64 *arr, int start, int finish)
2428 for (i = start; i <= finish; i++)
2429 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2432 static void intel_pt_print_info_str(const char *name, const char *str)
2437 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
2440 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2442 return auxtrace_info->header.size >=
2443 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2446 int intel_pt_process_auxtrace_info(union perf_event *event,
2447 struct perf_session *session)
2449 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2450 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2451 struct intel_pt *pt;
2456 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2460 pt = zalloc(sizeof(struct intel_pt));
2464 addr_filters__init(&pt->filts);
2466 err = perf_config(intel_pt_perf_config, pt);
2470 err = auxtrace_queues__init(&pt->queues);
2474 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2476 pt->session = session;
2477 pt->machine = &session->machines.host; /* No kvm support */
2478 pt->auxtrace_type = auxtrace_info->type;
2479 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2480 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2481 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2482 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2483 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2484 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2485 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2486 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2487 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2488 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2489 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2490 INTEL_PT_PER_CPU_MMAPS);
2492 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
2493 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2494 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2495 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2496 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2497 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2498 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2502 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
2503 pt->max_non_turbo_ratio =
2504 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2505 intel_pt_print_info(&auxtrace_info->priv[0],
2506 INTEL_PT_MAX_NONTURBO_RATIO,
2507 INTEL_PT_MAX_NONTURBO_RATIO);
2510 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
2511 info_end = (void *)info + auxtrace_info->header.size;
2513 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
2516 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
2517 intel_pt_print_info(&auxtrace_info->priv[0],
2518 INTEL_PT_FILTER_STR_LEN,
2519 INTEL_PT_FILTER_STR_LEN);
2521 const char *filter = (const char *)info;
2523 len = roundup(len + 1, 8);
2525 if ((void *)info > info_end) {
2526 pr_err("%s: bad filter string length\n", __func__);
2528 goto err_free_queues;
2530 pt->filter = memdup(filter, len);
2533 goto err_free_queues;
2535 if (session->header.needs_swap)
2536 mem_bswap_64(pt->filter, len);
2537 if (pt->filter[len - 1]) {
2538 pr_err("%s: filter string not null terminated\n", __func__);
2540 goto err_free_queues;
2542 err = addr_filters__parse_bare_filter(&pt->filts,
2545 goto err_free_queues;
2547 intel_pt_print_info_str("Filter string", pt->filter);
2550 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2551 if (pt->timeless_decoding && !pt->tc.time_mult)
2552 pt->tc.time_mult = 1;
2553 pt->have_tsc = intel_pt_have_tsc(pt);
2554 pt->sampling_mode = false;
2555 pt->est_tsc = !pt->timeless_decoding;
2557 pt->unknown_thread = thread__new(999999999, 999999999);
2558 if (!pt->unknown_thread) {
2560 goto err_free_queues;
2564 * Since this thread will not be kept in any rbtree not in a
2565 * list, initialize its list node so that at thread__put() the
2566 * current thread lifetime assuption is kept and we don't segfault
2567 * at list_del_init().
2569 INIT_LIST_HEAD(&pt->unknown_thread->node);
2571 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2573 goto err_delete_thread;
2574 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2576 goto err_delete_thread;
2579 pt->auxtrace.process_event = intel_pt_process_event;
2580 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2581 pt->auxtrace.flush_events = intel_pt_flush;
2582 pt->auxtrace.free_events = intel_pt_free_events;
2583 pt->auxtrace.free = intel_pt_free;
2584 session->auxtrace = &pt->auxtrace;
2589 if (pt->have_sched_switch == 1) {
2590 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2591 if (!pt->switch_evsel) {
2592 pr_err("%s: missing sched_switch event\n", __func__);
2594 goto err_delete_thread;
2596 } else if (pt->have_sched_switch == 2 &&
2597 !intel_pt_find_switch(session->evlist)) {
2598 pr_err("%s: missing context_switch attribute flag\n", __func__);
2600 goto err_delete_thread;
2603 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2604 pt->synth_opts = *session->itrace_synth_opts;
2606 itrace_synth_opts__set_default(&pt->synth_opts);
2607 if (use_browser != -1) {
2608 pt->synth_opts.branches = false;
2609 pt->synth_opts.callchain = true;
2611 if (session->itrace_synth_opts)
2612 pt->synth_opts.thread_stack =
2613 session->itrace_synth_opts->thread_stack;
2616 if (pt->synth_opts.log)
2617 intel_pt_log_enable();
2619 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2620 if (pt->tc.time_mult) {
2621 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2623 if (!pt->max_non_turbo_ratio)
2624 pt->max_non_turbo_ratio =
2625 (tsc_freq + 50000000) / 100000000;
2626 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2627 intel_pt_log("Maximum non-turbo ratio %u\n",
2628 pt->max_non_turbo_ratio);
2629 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
2632 if (pt->synth_opts.calls)
2633 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2634 PERF_IP_FLAG_TRACE_END;
2635 if (pt->synth_opts.returns)
2636 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2637 PERF_IP_FLAG_TRACE_BEGIN;
2639 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2640 symbol_conf.use_callchain = true;
2641 if (callchain_register_param(&callchain_param) < 0) {
2642 symbol_conf.use_callchain = false;
2643 pt->synth_opts.callchain = false;
2647 err = intel_pt_synth_events(pt, session);
2649 goto err_delete_thread;
2651 err = auxtrace_queues__process_index(&pt->queues, session);
2653 goto err_delete_thread;
2655 if (pt->queues.populated)
2656 pt->data_queued = true;
2658 if (pt->timeless_decoding)
2659 pr_debug2("Intel PT decoding without timestamps\n");
2664 thread__zput(pt->unknown_thread);
2666 intel_pt_log_disable();
2667 auxtrace_queues__free(&pt->queues);
2668 session->auxtrace = NULL;
2670 addr_filters__exit(&pt->filts);