1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
9 #include <linux/bitops.h>
10 #include <linux/err.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/types.h>
20 #include "cs-etm-decoder/cs-etm-decoder.h"
28 #include "thread_map.h"
29 #include "thread-stack.h"
32 #define MAX_TIMESTAMP (~0ULL)
35 * A64 instructions are always 4 bytes
37 * Only A64 is supported, so can use this constant for converting between
38 * addresses and instruction counts, calculting offsets etc
40 #define A64_INSTR_SIZE 4
42 struct cs_etm_auxtrace {
43 struct auxtrace auxtrace;
44 struct auxtrace_queues queues;
45 struct auxtrace_heap heap;
46 struct itrace_synth_opts synth_opts;
47 struct perf_session *session;
48 struct machine *machine;
49 struct thread *unknown_thread;
55 u8 sample_instructions;
59 u64 branches_sample_type;
61 u64 instructions_sample_type;
62 u64 instructions_sample_period;
66 unsigned int pmu_type;
70 struct cs_etm_auxtrace *etm;
71 struct thread *thread;
72 struct cs_etm_decoder *decoder;
73 struct auxtrace_buffer *buffer;
74 const struct cs_etm_state *state;
75 union perf_event *event_buf;
76 unsigned int queue_nr;
82 u64 period_instructions;
83 struct branch_stack *last_branch;
84 struct branch_stack *last_branch_rb;
85 size_t last_branch_pos;
86 struct cs_etm_packet *prev_packet;
87 struct cs_etm_packet *packet;
90 /* RB tree for quick conversion between traceID and metadata pointers */
91 static struct intlist *traceid_list;
93 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
94 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
95 pid_t tid, u64 time_);
97 int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
99 struct int_node *inode;
102 inode = intlist__find(traceid_list, trace_chan_id);
106 metadata = inode->priv;
107 *cpu = (int)metadata[CS_ETM_CPU];
111 static void cs_etm__packet_dump(const char *pkt_string)
113 const char *color = PERF_COLOR_BLUE;
114 int len = strlen(pkt_string);
116 if (len && (pkt_string[len-1] == '\n'))
117 color_fprintf(stdout, color, " %s", pkt_string);
119 color_fprintf(stdout, color, " %s\n", pkt_string);
124 static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
125 struct auxtrace_buffer *buffer)
128 const char *color = PERF_COLOR_BLUE;
129 struct cs_etm_decoder_params d_params;
130 struct cs_etm_trace_params *t_params;
131 struct cs_etm_decoder *decoder;
132 size_t buffer_used = 0;
134 fprintf(stdout, "\n");
135 color_fprintf(stdout, color,
136 ". ... CoreSight ETM Trace data: size %zu bytes\n",
139 /* Use metadata to fill in trace parameters for trace decoder */
140 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
141 for (i = 0; i < etm->num_cpu; i++) {
142 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
143 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
144 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
145 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
146 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
147 t_params[i].etmv4.reg_configr =
148 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
149 t_params[i].etmv4.reg_traceidr =
150 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
153 /* Set decoder parameters to simply print the trace packets */
154 d_params.packet_printer = cs_etm__packet_dump;
155 d_params.operation = CS_ETM_OPERATION_PRINT;
156 d_params.formatted = true;
157 d_params.fsyncs = false;
158 d_params.hsyncs = false;
159 d_params.frame_aligned = true;
161 decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
170 ret = cs_etm_decoder__process_data_block(
171 decoder, buffer->offset,
172 &((u8 *)buffer->data)[buffer_used],
173 buffer->size - buffer_used, &consumed);
177 buffer_used += consumed;
178 } while (buffer_used < buffer->size);
180 cs_etm_decoder__free(decoder);
183 static int cs_etm__flush_events(struct perf_session *session,
184 struct perf_tool *tool)
187 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
188 struct cs_etm_auxtrace,
193 if (!tool->ordered_events)
196 if (!etm->timeless_decoding)
199 ret = cs_etm__update_queues(etm);
204 return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
207 static void cs_etm__free_queue(void *priv)
209 struct cs_etm_queue *etmq = priv;
214 thread__zput(etmq->thread);
215 cs_etm_decoder__free(etmq->decoder);
216 zfree(&etmq->event_buf);
217 zfree(&etmq->last_branch);
218 zfree(&etmq->last_branch_rb);
219 zfree(&etmq->prev_packet);
220 zfree(&etmq->packet);
224 static void cs_etm__free_events(struct perf_session *session)
227 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
228 struct cs_etm_auxtrace,
230 struct auxtrace_queues *queues = &aux->queues;
232 for (i = 0; i < queues->nr_queues; i++) {
233 cs_etm__free_queue(queues->queue_array[i].priv);
234 queues->queue_array[i].priv = NULL;
237 auxtrace_queues__free(queues);
240 static void cs_etm__free(struct perf_session *session)
243 struct int_node *inode, *tmp;
244 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
245 struct cs_etm_auxtrace,
247 cs_etm__free_events(session);
248 session->auxtrace = NULL;
250 /* First remove all traceID/metadata nodes for the RB tree */
251 intlist__for_each_entry_safe(inode, tmp, traceid_list)
252 intlist__remove(traceid_list, inode);
253 /* Then the RB tree itself */
254 intlist__delete(traceid_list);
256 for (i = 0; i < aux->num_cpu; i++)
257 zfree(&aux->metadata[i]);
259 thread__zput(aux->unknown_thread);
260 zfree(&aux->metadata);
264 static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
266 struct machine *machine;
268 machine = etmq->etm->machine;
270 if (address >= etmq->etm->kernel_start) {
271 if (machine__is_host(machine))
272 return PERF_RECORD_MISC_KERNEL;
274 return PERF_RECORD_MISC_GUEST_KERNEL;
276 if (machine__is_host(machine))
277 return PERF_RECORD_MISC_USER;
279 return PERF_RECORD_MISC_GUEST_USER;
281 return PERF_RECORD_MISC_HYPERVISOR;
285 static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
286 size_t size, u8 *buffer)
291 struct thread *thread;
292 struct machine *machine;
293 struct addr_location al;
298 machine = etmq->etm->machine;
299 cpumode = cs_etm__cpu_mode(etmq, address);
301 thread = etmq->thread;
303 if (cpumode != PERF_RECORD_MISC_KERNEL)
305 thread = etmq->etm->unknown_thread;
308 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
311 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
312 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
315 offset = al.map->map_ip(al.map, address);
319 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
327 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
328 unsigned int queue_nr)
331 struct cs_etm_decoder_params d_params;
332 struct cs_etm_trace_params *t_params;
333 struct cs_etm_queue *etmq;
334 size_t szp = sizeof(struct cs_etm_packet);
336 etmq = zalloc(sizeof(*etmq));
340 etmq->packet = zalloc(szp);
344 if (etm->synth_opts.last_branch || etm->sample_branches) {
345 etmq->prev_packet = zalloc(szp);
346 if (!etmq->prev_packet)
350 if (etm->synth_opts.last_branch) {
351 size_t sz = sizeof(struct branch_stack);
353 sz += etm->synth_opts.last_branch_sz *
354 sizeof(struct branch_entry);
355 etmq->last_branch = zalloc(sz);
356 if (!etmq->last_branch)
358 etmq->last_branch_rb = zalloc(sz);
359 if (!etmq->last_branch_rb)
363 etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
364 if (!etmq->event_buf)
368 etmq->queue_nr = queue_nr;
373 /* Use metadata to fill in trace parameters for trace decoder */
374 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
379 for (i = 0; i < etm->num_cpu; i++) {
380 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
381 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
382 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
383 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
384 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
385 t_params[i].etmv4.reg_configr =
386 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
387 t_params[i].etmv4.reg_traceidr =
388 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
391 /* Set decoder parameters to simply print the trace packets */
392 d_params.packet_printer = cs_etm__packet_dump;
393 d_params.operation = CS_ETM_OPERATION_DECODE;
394 d_params.formatted = true;
395 d_params.fsyncs = false;
396 d_params.hsyncs = false;
397 d_params.frame_aligned = true;
398 d_params.data = etmq;
400 etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
408 * Register a function to handle all memory accesses required by
409 * the trace decoder library.
411 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
414 goto out_free_decoder;
417 etmq->period_instructions = 0;
422 cs_etm_decoder__free(etmq->decoder);
424 zfree(&etmq->event_buf);
425 zfree(&etmq->last_branch);
426 zfree(&etmq->last_branch_rb);
427 zfree(&etmq->prev_packet);
428 zfree(&etmq->packet);
434 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
435 struct auxtrace_queue *queue,
436 unsigned int queue_nr)
438 struct cs_etm_queue *etmq = queue->priv;
440 if (list_empty(&queue->head) || etmq)
443 etmq = cs_etm__alloc_queue(etm, queue_nr);
450 if (queue->cpu != -1)
451 etmq->cpu = queue->cpu;
453 etmq->tid = queue->tid;
458 static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
463 for (i = 0; i < etm->queues.nr_queues; i++) {
464 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
472 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
474 if (etm->queues.new_data) {
475 etm->queues.new_data = false;
476 return cs_etm__setup_queues(etm);
482 static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq)
484 struct branch_stack *bs_src = etmq->last_branch_rb;
485 struct branch_stack *bs_dst = etmq->last_branch;
489 * Set the number of records before early exit: ->nr is used to
490 * determine how many branches to copy from ->entries.
492 bs_dst->nr = bs_src->nr;
495 * Early exit when there is nothing to copy.
501 * As bs_src->entries is a circular buffer, we need to copy from it in
502 * two steps. First, copy the branches from the most recently inserted
503 * branch ->last_branch_pos until the end of bs_src->entries buffer.
505 nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos;
506 memcpy(&bs_dst->entries[0],
507 &bs_src->entries[etmq->last_branch_pos],
508 sizeof(struct branch_entry) * nr);
511 * If we wrapped around at least once, the branches from the beginning
512 * of the bs_src->entries buffer and until the ->last_branch_pos element
513 * are older valid branches: copy them over. The total number of
514 * branches copied over will be equal to the number of branches asked by
515 * the user in last_branch_sz.
517 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
518 memcpy(&bs_dst->entries[nr],
520 sizeof(struct branch_entry) * etmq->last_branch_pos);
524 static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
526 etmq->last_branch_pos = 0;
527 etmq->last_branch_rb->nr = 0;
530 static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
532 /* Returns 0 for the CS_ETM_TRACE_ON packet */
533 if (packet->sample_type == CS_ETM_TRACE_ON)
537 * The packet records the execution range with an exclusive end address
539 * A64 instructions are constant size, so the last executed
540 * instruction is A64_INSTR_SIZE before the end address
541 * Will need to do instruction level decode for T32 instructions as
542 * they can be variable size (not yet supported).
544 return packet->end_addr - A64_INSTR_SIZE;
547 static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
549 /* Returns 0 for the CS_ETM_TRACE_ON packet */
550 if (packet->sample_type == CS_ETM_TRACE_ON)
553 return packet->start_addr;
556 static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
559 * Only A64 instructions are currently supported, so can get
560 * instruction count by dividing.
561 * Will need to do instruction level decode for T32 instructions as
562 * they can be variable size (not yet supported).
564 return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
567 static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet,
571 * Only A64 instructions are currently supported, so can get
572 * instruction address by muliplying.
573 * Will need to do instruction level decode for T32 instructions as
574 * they can be variable size (not yet supported).
576 return packet->start_addr + offset * A64_INSTR_SIZE;
579 static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
581 struct branch_stack *bs = etmq->last_branch_rb;
582 struct branch_entry *be;
585 * The branches are recorded in a circular buffer in reverse
586 * chronological order: we start recording from the last element of the
587 * buffer down. After writing the first element of the stack, move the
588 * insert position back to the end of the buffer.
590 if (!etmq->last_branch_pos)
591 etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
593 etmq->last_branch_pos -= 1;
595 be = &bs->entries[etmq->last_branch_pos];
596 be->from = cs_etm__last_executed_instr(etmq->prev_packet);
597 be->to = cs_etm__first_executed_instr(etmq->packet);
598 /* No support for mispredict */
599 be->flags.mispred = 0;
600 be->flags.predicted = 1;
603 * Increment bs->nr until reaching the number of last branches asked by
604 * the user on the command line.
606 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
610 static int cs_etm__inject_event(union perf_event *event,
611 struct perf_sample *sample, u64 type)
613 event->header.size = perf_event__sample_event_size(sample, type, 0);
614 return perf_event__synthesize_sample(event, type, 0, sample);
619 cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
621 struct auxtrace_buffer *aux_buffer = etmq->buffer;
622 struct auxtrace_buffer *old_buffer = aux_buffer;
623 struct auxtrace_queue *queue;
625 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
627 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
629 /* If no more data, drop the previous auxtrace_buffer and return */
632 auxtrace_buffer__drop_data(old_buffer);
637 etmq->buffer = aux_buffer;
639 /* If the aux_buffer doesn't have data associated, try to load it */
640 if (!aux_buffer->data) {
641 /* get the file desc associated with the perf data file */
642 int fd = perf_data__fd(etmq->etm->session->data);
644 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
645 if (!aux_buffer->data)
649 /* If valid, drop the previous buffer */
651 auxtrace_buffer__drop_data(old_buffer);
653 buff->offset = aux_buffer->offset;
654 buff->len = aux_buffer->size;
655 buff->buf = aux_buffer->data;
657 buff->ref_timestamp = aux_buffer->reference;
662 static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
663 struct auxtrace_queue *queue)
665 struct cs_etm_queue *etmq = queue->priv;
667 /* CPU-wide tracing isn't supported yet */
668 if (queue->tid == -1)
671 if ((!etmq->thread) && (etmq->tid != -1))
672 etmq->thread = machine__find_thread(etm->machine, -1,
676 etmq->pid = etmq->thread->pid_;
677 if (queue->cpu == -1)
678 etmq->cpu = etmq->thread->cpu;
682 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
683 u64 addr, u64 period)
686 struct cs_etm_auxtrace *etm = etmq->etm;
687 union perf_event *event = etmq->event_buf;
688 struct perf_sample sample = {.ip = 0,};
690 event->sample.header.type = PERF_RECORD_SAMPLE;
691 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
692 event->sample.header.size = sizeof(struct perf_event_header);
695 sample.pid = etmq->pid;
696 sample.tid = etmq->tid;
697 sample.id = etmq->etm->instructions_id;
698 sample.stream_id = etmq->etm->instructions_id;
699 sample.period = period;
700 sample.cpu = etmq->packet->cpu;
703 sample.cpumode = event->sample.header.misc;
705 if (etm->synth_opts.last_branch) {
706 cs_etm__copy_last_branch_rb(etmq);
707 sample.branch_stack = etmq->last_branch;
710 if (etm->synth_opts.inject) {
711 ret = cs_etm__inject_event(event, &sample,
712 etm->instructions_sample_type);
717 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
721 "CS ETM Trace: failed to deliver instruction event, error %d\n",
724 if (etm->synth_opts.last_branch)
725 cs_etm__reset_last_branch_rb(etmq);
731 * The cs etm packet encodes an instruction range between a branch target
732 * and the next taken branch. Generate sample accordingly.
734 static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
737 struct cs_etm_auxtrace *etm = etmq->etm;
738 struct perf_sample sample = {.ip = 0,};
739 union perf_event *event = etmq->event_buf;
740 struct dummy_branch_stack {
742 struct branch_entry entries;
746 ip = cs_etm__last_executed_instr(etmq->prev_packet);
748 event->sample.header.type = PERF_RECORD_SAMPLE;
749 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
750 event->sample.header.size = sizeof(struct perf_event_header);
753 sample.pid = etmq->pid;
754 sample.tid = etmq->tid;
755 sample.addr = cs_etm__first_executed_instr(etmq->packet);
756 sample.id = etmq->etm->branches_id;
757 sample.stream_id = etmq->etm->branches_id;
759 sample.cpu = etmq->packet->cpu;
761 sample.cpumode = event->sample.header.misc;
764 * perf report cannot handle events without a branch stack
766 if (etm->synth_opts.last_branch) {
767 dummy_bs = (struct dummy_branch_stack){
774 sample.branch_stack = (struct branch_stack *)&dummy_bs;
777 if (etm->synth_opts.inject) {
778 ret = cs_etm__inject_event(event, &sample,
779 etm->branches_sample_type);
784 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
788 "CS ETM Trace: failed to deliver instruction event, error %d\n",
794 struct cs_etm_synth {
795 struct perf_tool dummy_tool;
796 struct perf_session *session;
799 static int cs_etm__event_synth(struct perf_tool *tool,
800 union perf_event *event,
801 struct perf_sample *sample __maybe_unused,
802 struct machine *machine __maybe_unused)
804 struct cs_etm_synth *cs_etm_synth =
805 container_of(tool, struct cs_etm_synth, dummy_tool);
807 return perf_session__deliver_synth_event(cs_etm_synth->session,
811 static int cs_etm__synth_event(struct perf_session *session,
812 struct perf_event_attr *attr, u64 id)
814 struct cs_etm_synth cs_etm_synth;
816 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
817 cs_etm_synth.session = session;
819 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
820 &id, cs_etm__event_synth);
823 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
824 struct perf_session *session)
826 struct perf_evlist *evlist = session->evlist;
827 struct perf_evsel *evsel;
828 struct perf_event_attr attr;
833 evlist__for_each_entry(evlist, evsel) {
834 if (evsel->attr.type == etm->pmu_type) {
841 pr_debug("No selected events with CoreSight Trace data\n");
845 memset(&attr, 0, sizeof(struct perf_event_attr));
846 attr.size = sizeof(struct perf_event_attr);
847 attr.type = PERF_TYPE_HARDWARE;
848 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
849 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
851 if (etm->timeless_decoding)
852 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
854 attr.sample_type |= PERF_SAMPLE_TIME;
856 attr.exclude_user = evsel->attr.exclude_user;
857 attr.exclude_kernel = evsel->attr.exclude_kernel;
858 attr.exclude_hv = evsel->attr.exclude_hv;
859 attr.exclude_host = evsel->attr.exclude_host;
860 attr.exclude_guest = evsel->attr.exclude_guest;
861 attr.sample_id_all = evsel->attr.sample_id_all;
862 attr.read_format = evsel->attr.read_format;
864 /* create new id val to be a fixed offset from evsel id */
865 id = evsel->id[0] + 1000000000;
870 if (etm->synth_opts.branches) {
871 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
872 attr.sample_period = 1;
873 attr.sample_type |= PERF_SAMPLE_ADDR;
874 err = cs_etm__synth_event(session, &attr, id);
877 etm->sample_branches = true;
878 etm->branches_sample_type = attr.sample_type;
879 etm->branches_id = id;
881 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
884 if (etm->synth_opts.last_branch)
885 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
887 if (etm->synth_opts.instructions) {
888 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
889 attr.sample_period = etm->synth_opts.period;
890 etm->instructions_sample_period = attr.sample_period;
891 err = cs_etm__synth_event(session, &attr, id);
894 etm->sample_instructions = true;
895 etm->instructions_sample_type = attr.sample_type;
896 etm->instructions_id = id;
903 static int cs_etm__sample(struct cs_etm_queue *etmq)
905 struct cs_etm_auxtrace *etm = etmq->etm;
906 struct cs_etm_packet *tmp;
910 instrs_executed = cs_etm__instr_count(etmq->packet);
911 etmq->period_instructions += instrs_executed;
914 * Record a branch when the last instruction in
915 * PREV_PACKET is a branch.
917 if (etm->synth_opts.last_branch &&
919 etmq->prev_packet->sample_type == CS_ETM_RANGE &&
920 etmq->prev_packet->last_instr_taken_branch)
921 cs_etm__update_last_branch_rb(etmq);
923 if (etm->sample_instructions &&
924 etmq->period_instructions >= etm->instructions_sample_period) {
926 * Emit instruction sample periodically
927 * TODO: allow period to be defined in cycles and clock time
930 /* Get number of instructions executed after the sample point */
931 u64 instrs_over = etmq->period_instructions -
932 etm->instructions_sample_period;
935 * Calculate the address of the sampled instruction (-1 as
936 * sample is reported as though instruction has just been
937 * executed, but PC has not advanced to next instruction)
939 u64 offset = (instrs_executed - instrs_over - 1);
940 u64 addr = cs_etm__instr_addr(etmq->packet, offset);
942 ret = cs_etm__synth_instruction_sample(
943 etmq, addr, etm->instructions_sample_period);
947 /* Carry remaining instructions into next sample period */
948 etmq->period_instructions = instrs_over;
951 if (etm->sample_branches && etmq->prev_packet) {
952 bool generate_sample = false;
954 /* Generate sample for tracing on packet */
955 if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON)
956 generate_sample = true;
958 /* Generate sample for branch taken packet */
959 if (etmq->prev_packet->sample_type == CS_ETM_RANGE &&
960 etmq->prev_packet->last_instr_taken_branch)
961 generate_sample = true;
963 if (generate_sample) {
964 ret = cs_etm__synth_branch_sample(etmq);
970 if (etm->sample_branches || etm->synth_opts.last_branch) {
972 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
973 * the next incoming packet.
976 etmq->packet = etmq->prev_packet;
977 etmq->prev_packet = tmp;
983 static int cs_etm__flush(struct cs_etm_queue *etmq)
986 struct cs_etm_auxtrace *etm = etmq->etm;
987 struct cs_etm_packet *tmp;
989 if (!etmq->prev_packet)
992 /* Handle start tracing packet */
993 if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
996 if (etmq->etm->synth_opts.last_branch &&
997 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
999 * Generate a last branch event for the branches left in the
1000 * circular buffer at the end of the trace.
1002 * Use the address of the end of the last reported execution
1005 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
1007 err = cs_etm__synth_instruction_sample(
1009 etmq->period_instructions);
1013 etmq->period_instructions = 0;
1017 if (etm->sample_branches &&
1018 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1019 err = cs_etm__synth_branch_sample(etmq);
1025 if (etm->sample_branches || etm->synth_opts.last_branch) {
1027 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
1028 * the next incoming packet.
1031 etmq->packet = etmq->prev_packet;
1032 etmq->prev_packet = tmp;
1038 static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
1040 struct cs_etm_auxtrace *etm = etmq->etm;
1041 struct cs_etm_buffer buffer;
1042 size_t buffer_used, processed;
1045 if (!etm->kernel_start)
1046 etm->kernel_start = machine__kernel_start(etm->machine);
1048 /* Go through each buffer in the queue and decode them one by one */
1051 memset(&buffer, 0, sizeof(buffer));
1052 err = cs_etm__get_trace(&buffer, etmq);
1056 * We cannot assume consecutive blocks in the data file are
1057 * contiguous, reset the decoder to force re-sync.
1059 err = cs_etm_decoder__reset(etmq->decoder);
1063 /* Run trace decoder until buffer consumed or end of trace */
1066 err = cs_etm_decoder__process_data_block(
1069 &buffer.buf[buffer_used],
1070 buffer.len - buffer_used,
1075 etmq->offset += processed;
1076 buffer_used += processed;
1078 /* Process each packet in this chunk */
1080 err = cs_etm_decoder__get_packet(etmq->decoder,
1084 * Stop processing this chunk on
1085 * end of data or error
1089 switch (etmq->packet->sample_type) {
1092 * If the packet contains an instruction
1093 * range, generate instruction sequence
1096 cs_etm__sample(etmq);
1098 case CS_ETM_TRACE_ON:
1100 * Discontinuity in trace, flush
1101 * previous branch stack
1103 cs_etm__flush(etmq);
1107 * Should not receive empty packet,
1110 pr_err("CS ETM Trace: empty packet\n");
1116 } while (buffer.len > buffer_used);
1119 /* Flush any remaining branch stack entries */
1120 err = cs_etm__flush(etmq);
1126 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
1127 pid_t tid, u64 time_)
1130 struct auxtrace_queues *queues = &etm->queues;
1132 for (i = 0; i < queues->nr_queues; i++) {
1133 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
1134 struct cs_etm_queue *etmq = queue->priv;
1136 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
1138 cs_etm__set_pid_tid_cpu(etm, queue);
1139 cs_etm__run_decoder(etmq);
1146 static int cs_etm__process_event(struct perf_session *session,
1147 union perf_event *event,
1148 struct perf_sample *sample,
1149 struct perf_tool *tool)
1153 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1154 struct cs_etm_auxtrace,
1160 if (!tool->ordered_events) {
1161 pr_err("CoreSight ETM Trace requires ordered events\n");
1165 if (!etm->timeless_decoding)
1168 if (sample->time && (sample->time != (u64) -1))
1169 timestamp = sample->time;
1173 if (timestamp || etm->timeless_decoding) {
1174 err = cs_etm__update_queues(etm);
1179 if (event->header.type == PERF_RECORD_EXIT)
1180 return cs_etm__process_timeless_queues(etm,
1187 static int cs_etm__process_auxtrace_event(struct perf_session *session,
1188 union perf_event *event,
1189 struct perf_tool *tool __maybe_unused)
1191 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1192 struct cs_etm_auxtrace,
1194 if (!etm->data_queued) {
1195 struct auxtrace_buffer *buffer;
1197 int fd = perf_data__fd(session->data);
1198 bool is_pipe = perf_data__is_pipe(session->data);
1204 data_offset = lseek(fd, 0, SEEK_CUR);
1205 if (data_offset == -1)
1209 err = auxtrace_queues__add_event(&etm->queues, session,
1210 event, data_offset, &buffer);
1215 if (auxtrace_buffer__get_data(buffer, fd)) {
1216 cs_etm__dump_event(etm, buffer);
1217 auxtrace_buffer__put_data(buffer);
1224 static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
1226 struct perf_evsel *evsel;
1227 struct perf_evlist *evlist = etm->session->evlist;
1228 bool timeless_decoding = true;
1231 * Circle through the list of event and complain if we find one
1232 * with the time bit set.
1234 evlist__for_each_entry(evlist, evsel) {
1235 if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
1236 timeless_decoding = false;
1239 return timeless_decoding;
1242 static const char * const cs_etm_global_header_fmts[] = {
1243 [CS_HEADER_VERSION_0] = " Header version %llx\n",
1244 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
1245 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
1248 static const char * const cs_etm_priv_fmts[] = {
1249 [CS_ETM_MAGIC] = " Magic number %llx\n",
1250 [CS_ETM_CPU] = " CPU %lld\n",
1251 [CS_ETM_ETMCR] = " ETMCR %llx\n",
1252 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
1253 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
1254 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
1257 static const char * const cs_etmv4_priv_fmts[] = {
1258 [CS_ETM_MAGIC] = " Magic number %llx\n",
1259 [CS_ETM_CPU] = " CPU %lld\n",
1260 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
1261 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
1262 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
1263 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
1264 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
1265 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
1266 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
1269 static void cs_etm__print_auxtrace_info(u64 *val, int num)
1273 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1274 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
1276 for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
1277 if (val[i] == __perf_cs_etmv3_magic)
1278 for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
1279 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
1280 else if (val[i] == __perf_cs_etmv4_magic)
1281 for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
1282 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
1284 /* failure.. return */
1289 int cs_etm__process_auxtrace_info(union perf_event *event,
1290 struct perf_session *session)
1292 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
1293 struct cs_etm_auxtrace *etm = NULL;
1294 struct int_node *inode;
1295 unsigned int pmu_type;
1296 int event_header_size = sizeof(struct perf_event_header);
1297 int info_header_size;
1298 int total_size = auxtrace_info->header.size;
1301 int err = 0, idx = -1;
1303 u64 *ptr, *hdr = NULL;
1304 u64 **metadata = NULL;
1307 * sizeof(auxtrace_info_event::type) +
1308 * sizeof(auxtrace_info_event::reserved) == 8
1310 info_header_size = 8;
1312 if (total_size < (event_header_size + info_header_size))
1315 priv_size = total_size - event_header_size - info_header_size;
1317 /* First the global part */
1318 ptr = (u64 *) auxtrace_info->priv;
1320 /* Look for version '0' of the header */
1324 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
1328 /* Extract header information - see cs-etm.h for format */
1329 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1331 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1332 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
1336 * Create an RB tree for traceID-metadata tuple. Since the conversion
1337 * has to be made for each packet that gets decoded, optimizing access
1338 * in anything other than a sequential array is worth doing.
1340 traceid_list = intlist__new(NULL);
1341 if (!traceid_list) {
1346 metadata = zalloc(sizeof(*metadata) * num_cpu);
1349 goto err_free_traceid_list;
1353 * The metadata is stored in the auxtrace_info section and encodes
1354 * the configuration of the ARM embedded trace macrocell which is
1355 * required by the trace decoder to properly decode the trace due
1356 * to its highly compressed nature.
1358 for (j = 0; j < num_cpu; j++) {
1359 if (ptr[i] == __perf_cs_etmv3_magic) {
1360 metadata[j] = zalloc(sizeof(*metadata[j]) *
1364 goto err_free_metadata;
1366 for (k = 0; k < CS_ETM_PRIV_MAX; k++)
1367 metadata[j][k] = ptr[i + k];
1369 /* The traceID is our handle */
1370 idx = metadata[j][CS_ETM_ETMTRACEIDR];
1371 i += CS_ETM_PRIV_MAX;
1372 } else if (ptr[i] == __perf_cs_etmv4_magic) {
1373 metadata[j] = zalloc(sizeof(*metadata[j]) *
1377 goto err_free_metadata;
1379 for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
1380 metadata[j][k] = ptr[i + k];
1382 /* The traceID is our handle */
1383 idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
1384 i += CS_ETMV4_PRIV_MAX;
1387 /* Get an RB node for this CPU */
1388 inode = intlist__findnew(traceid_list, idx);
1390 /* Something went wrong, no need to continue */
1392 err = PTR_ERR(inode);
1393 goto err_free_metadata;
1397 * The node for that CPU should not be taken.
1398 * Back out if that's the case.
1402 goto err_free_metadata;
1404 /* All good, associate the traceID with the metadata pointer */
1405 inode->priv = metadata[j];
1409 * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
1410 * CS_ETMV4_PRIV_MAX mark how many double words are in the
1411 * global metadata, and each cpu's metadata respectively.
1412 * The following tests if the correct number of double words was
1413 * present in the auxtrace info section.
1415 if (i * 8 != priv_size) {
1417 goto err_free_metadata;
1420 etm = zalloc(sizeof(*etm));
1424 goto err_free_metadata;
1427 err = auxtrace_queues__init(&etm->queues);
1431 etm->session = session;
1432 etm->machine = &session->machines.host;
1434 etm->num_cpu = num_cpu;
1435 etm->pmu_type = pmu_type;
1436 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
1437 etm->metadata = metadata;
1438 etm->auxtrace_type = auxtrace_info->type;
1439 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
1441 etm->auxtrace.process_event = cs_etm__process_event;
1442 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
1443 etm->auxtrace.flush_events = cs_etm__flush_events;
1444 etm->auxtrace.free_events = cs_etm__free_events;
1445 etm->auxtrace.free = cs_etm__free;
1446 session->auxtrace = &etm->auxtrace;
1448 etm->unknown_thread = thread__new(999999999, 999999999);
1449 if (!etm->unknown_thread)
1450 goto err_free_queues;
1453 * Initialize list node so that at thread__zput() we can avoid
1454 * segmentation fault at list_del_init().
1456 INIT_LIST_HEAD(&etm->unknown_thread->node);
1458 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
1460 goto err_delete_thread;
1462 if (thread__init_map_groups(etm->unknown_thread, etm->machine))
1463 goto err_delete_thread;
1466 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
1470 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1471 etm->synth_opts = *session->itrace_synth_opts;
1473 itrace_synth_opts__set_default(&etm->synth_opts);
1474 etm->synth_opts.callchain = false;
1477 err = cs_etm__synth_events(etm, session);
1479 goto err_delete_thread;
1481 err = auxtrace_queues__process_index(&etm->queues, session);
1483 goto err_delete_thread;
1485 etm->data_queued = etm->queues.populated;
1490 thread__zput(etm->unknown_thread);
1492 auxtrace_queues__free(&etm->queues);
1493 session->auxtrace = NULL;
1497 /* No need to check @metadata[j], free(NULL) is supported */
1498 for (j = 0; j < num_cpu; j++)
1501 err_free_traceid_list:
1502 intlist__delete(traceid_list);