2 * BTS PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/bitops.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/debugfs.h>
23 #include <linux/device.h>
24 #include <linux/coredump.h>
25 #include <linux/kaiser.h>
27 #include <asm-generic/sizes.h>
28 #include <asm/perf_event.h>
30 #include "../perf_event.h"
33 struct perf_output_handle handle;
34 struct debug_store ds_back;
38 /* BTS context states: */
40 /* no ongoing AUX transactions */
41 BTS_STATE_STOPPED = 0,
42 /* AUX transaction is on, BTS tracing is disabled */
44 /* AUX transaction is on, BTS tracing is running */
48 static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
50 #define BTS_RECORD_SIZE 24
51 #define BTS_SAFETY_MARGIN 4080
57 unsigned long displacement;
61 size_t real_size; /* multiple of BTS_RECORD_SIZE */
62 unsigned int nr_pages;
71 struct bts_phys buf[0];
76 static size_t buf_size(struct page *page)
78 return 1 << (PAGE_SHIFT + page_private(page));
81 static void bts_buffer_free_aux(void *data)
83 #ifdef CONFIG_PAGE_TABLE_ISOLATION
84 struct bts_buffer *buf = data;
87 for (nbuf = 0; nbuf < buf->nr_bufs; nbuf++) {
88 struct page *page = buf->buf[nbuf].page;
89 void *kaddr = page_address(page);
90 size_t page_size = buf_size(page);
92 kaiser_remove_mapping((unsigned long)kaddr, page_size);
99 bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
101 struct bts_buffer *buf;
103 int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
104 unsigned long offset;
105 size_t size = nr_pages << PAGE_SHIFT;
108 /* count all the high order buffers */
109 for (pg = 0, nbuf = 0; pg < nr_pages;) {
110 page = virt_to_page(pages[pg]);
111 if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
113 pg += 1 << page_private(page);
118 * to avoid interrupts in overwrite mode, only allow one physical
120 if (overwrite && nbuf > 1)
123 buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
127 buf->nr_pages = nr_pages;
129 buf->snapshot = overwrite;
130 buf->data_pages = pages;
131 buf->real_size = size - size % BTS_RECORD_SIZE;
133 for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
134 void *kaddr = pages[pg];
137 page = virt_to_page(kaddr);
138 page_size = buf_size(page);
140 if (kaiser_add_mapping((unsigned long)kaddr,
141 page_size, __PAGE_KERNEL) < 0) {
143 bts_buffer_free_aux(buf);
147 buf->buf[nbuf].page = page;
148 buf->buf[nbuf].offset = offset;
149 buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
150 buf->buf[nbuf].size = page_size - buf->buf[nbuf].displacement;
151 pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
152 buf->buf[nbuf].size -= pad;
154 pg += page_size >> PAGE_SHIFT;
161 static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
163 return buf->buf[idx].offset + buf->buf[idx].displacement;
167 bts_config_buffer(struct bts_buffer *buf)
169 int cpu = raw_smp_processor_id();
170 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
171 struct bts_phys *phys = &buf->buf[buf->cur_buf];
172 unsigned long index, thresh = 0, end = phys->size;
173 struct page *page = phys->page;
175 index = local_read(&buf->head);
177 if (!buf->snapshot) {
178 if (buf->end < phys->offset + buf_size(page))
179 end = buf->end - phys->offset - phys->displacement;
181 index -= phys->offset + phys->displacement;
183 if (end - index > BTS_SAFETY_MARGIN)
184 thresh = end - BTS_SAFETY_MARGIN;
185 else if (end - index > BTS_RECORD_SIZE)
186 thresh = end - BTS_RECORD_SIZE;
191 ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
192 ds->bts_index = ds->bts_buffer_base + index;
193 ds->bts_absolute_maximum = ds->bts_buffer_base + end;
194 ds->bts_interrupt_threshold = !buf->snapshot
195 ? ds->bts_buffer_base + thresh
196 : ds->bts_absolute_maximum + BTS_RECORD_SIZE;
199 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head)
201 unsigned long index = head - phys->offset;
203 memset(page_address(phys->page) + index, 0, phys->size - index);
206 static void bts_update(struct bts_ctx *bts)
208 int cpu = raw_smp_processor_id();
209 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
210 struct bts_buffer *buf = perf_get_aux(&bts->handle);
211 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
216 head = index + bts_buffer_offset(buf, buf->cur_buf);
217 old = local_xchg(&buf->head, head);
219 if (!buf->snapshot) {
223 if (ds->bts_index >= ds->bts_absolute_maximum)
224 local_inc(&buf->lost);
227 * old and head are always in the same physical buffer, so we
228 * can subtract them to get the data size.
230 local_add(head - old, &buf->data_size);
232 local_set(&buf->data_size, head);
237 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
240 * Ordering PMU callbacks wrt themselves and the PMI is done by means
241 * of bts::state, which:
242 * - is set when bts::handle::event is valid, that is, between
243 * perf_aux_output_begin() and perf_aux_output_end();
244 * - is zero otherwise;
245 * - is ordered against bts::handle::event with a compiler barrier.
248 static void __bts_event_start(struct perf_event *event)
250 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
251 struct bts_buffer *buf = perf_get_aux(&bts->handle);
255 config |= ARCH_PERFMON_EVENTSEL_INT;
256 if (!event->attr.exclude_kernel)
257 config |= ARCH_PERFMON_EVENTSEL_OS;
258 if (!event->attr.exclude_user)
259 config |= ARCH_PERFMON_EVENTSEL_USR;
261 bts_config_buffer(buf);
264 * local barrier to make sure that ds configuration made it
265 * before we enable BTS and bts::state goes ACTIVE
269 /* INACTIVE/STOPPED -> ACTIVE */
270 WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
272 intel_pmu_enable_bts(config);
276 static void bts_event_start(struct perf_event *event, int flags)
278 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
279 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
280 struct bts_buffer *buf;
282 buf = perf_aux_output_begin(&bts->handle, event);
286 if (bts_buffer_reset(buf, &bts->handle))
289 bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
290 bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
291 bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
293 event->hw.itrace_started = 1;
296 __bts_event_start(event);
301 perf_aux_output_end(&bts->handle, 0, false);
304 event->hw.state = PERF_HES_STOPPED;
307 static void __bts_event_stop(struct perf_event *event, int state)
309 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
311 /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
312 WRITE_ONCE(bts->state, state);
315 * No extra synchronization is mandated by the documentation to have
316 * BTS data stores globally visible.
318 intel_pmu_disable_bts();
321 static void bts_event_stop(struct perf_event *event, int flags)
323 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
324 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
325 struct bts_buffer *buf = NULL;
326 int state = READ_ONCE(bts->state);
328 if (state == BTS_STATE_ACTIVE)
329 __bts_event_stop(event, BTS_STATE_STOPPED);
331 if (state != BTS_STATE_STOPPED)
332 buf = perf_get_aux(&bts->handle);
334 event->hw.state |= PERF_HES_STOPPED;
336 if (flags & PERF_EF_UPDATE) {
342 local_xchg(&buf->data_size,
343 buf->nr_pages << PAGE_SHIFT);
345 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
346 !!local_xchg(&buf->lost, 0));
349 cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
350 cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
351 cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
352 cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
356 void intel_bts_enable_local(void)
358 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
359 int state = READ_ONCE(bts->state);
362 * Here we transition from INACTIVE to ACTIVE;
363 * if we instead are STOPPED from the interrupt handler,
364 * stay that way. Can't be ACTIVE here though.
366 if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
369 if (state == BTS_STATE_STOPPED)
372 if (bts->handle.event)
373 __bts_event_start(bts->handle.event);
376 void intel_bts_disable_local(void)
378 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
381 * Here we transition from ACTIVE to INACTIVE;
382 * do nothing for STOPPED or INACTIVE.
384 if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
387 if (bts->handle.event)
388 __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
392 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
394 unsigned long head, space, next_space, pad, gap, skip, wakeup;
395 unsigned int next_buf;
396 struct bts_phys *phys, *next_phys;
402 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
404 phys = &buf->buf[buf->cur_buf];
405 space = phys->offset + phys->displacement + phys->size - head;
407 if (space > handle->size) {
408 space = handle->size;
409 space -= space % BTS_RECORD_SIZE;
411 if (space <= BTS_SAFETY_MARGIN) {
412 /* See if next phys buffer has more space */
413 next_buf = buf->cur_buf + 1;
414 if (next_buf >= buf->nr_bufs)
416 next_phys = &buf->buf[next_buf];
417 gap = buf_size(phys->page) - phys->displacement - phys->size +
418 next_phys->displacement;
420 if (handle->size >= skip) {
421 next_space = next_phys->size;
422 if (next_space + skip > handle->size) {
423 next_space = handle->size - skip;
424 next_space -= next_space % BTS_RECORD_SIZE;
426 if (next_space > space || !space) {
428 bts_buffer_pad_out(phys, head);
429 ret = perf_aux_output_skip(handle, skip);
432 /* Advance to next phys buffer */
435 head = phys->offset + phys->displacement;
437 * After this, cur_buf and head won't match ds
438 * anymore, so we must not be racing with
441 buf->cur_buf = next_buf;
442 local_set(&buf->head, head);
447 /* Don't go far beyond wakeup watermark */
448 wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
450 if (space > wakeup) {
452 space -= space % BTS_RECORD_SIZE;
455 buf->end = head + space;
458 * If we have no space, the lost notification would have been sent when
459 * we hit absolute_maximum - see bts_update()
467 int intel_bts_interrupt(void)
469 struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
470 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
471 struct perf_event *event = bts->handle.event;
472 struct bts_buffer *buf;
474 int err = -ENOSPC, handled = 0;
477 * The only surefire way of knowing if this NMI is ours is by checking
478 * the write ptr against the PMI threshold.
480 if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
484 * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
485 * so we can only be INACTIVE or STOPPED
487 if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
490 buf = perf_get_aux(&bts->handle);
495 * Skip snapshot counters: they don't use the interrupt, but
496 * there's no other way of telling, because the pointer will
502 old_head = local_read(&buf->head);
506 if (old_head == local_read(&buf->head))
509 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
510 !!local_xchg(&buf->lost, 0));
512 buf = perf_aux_output_begin(&bts->handle, event);
514 err = bts_buffer_reset(buf, &bts->handle);
517 WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
521 * BTS_STATE_STOPPED should be visible before
522 * cleared handle::event
525 perf_aux_output_end(&bts->handle, 0, false);
532 static void bts_event_del(struct perf_event *event, int mode)
534 bts_event_stop(event, PERF_EF_UPDATE);
537 static int bts_event_add(struct perf_event *event, int mode)
539 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
540 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
541 struct hw_perf_event *hwc = &event->hw;
543 event->hw.state = PERF_HES_STOPPED;
545 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
548 if (bts->handle.event)
551 if (mode & PERF_EF_START) {
552 bts_event_start(event, 0);
553 if (hwc->state & PERF_HES_STOPPED)
560 static void bts_event_destroy(struct perf_event *event)
562 x86_release_hardware();
563 x86_del_exclusive(x86_lbr_exclusive_bts);
566 static int bts_event_init(struct perf_event *event)
570 if (event->attr.type != bts_pmu.type)
573 if (x86_add_exclusive(x86_lbr_exclusive_bts))
577 * BTS leaks kernel addresses even when CPL0 tracing is
578 * disabled, so disallow intel_bts driver for unprivileged
579 * users on paranoid systems since it provides trace data
580 * to the user in a zero-copy fashion.
582 * Note that the default paranoia setting permits unprivileged
583 * users to profile the kernel.
585 if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
586 !capable(CAP_SYS_ADMIN))
589 ret = x86_reserve_hardware();
591 x86_del_exclusive(x86_lbr_exclusive_bts);
595 event->destroy = bts_event_destroy;
600 static void bts_event_read(struct perf_event *event)
604 static __init int bts_init(void)
606 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
609 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
610 PERF_PMU_CAP_EXCLUSIVE;
611 bts_pmu.task_ctx_nr = perf_sw_context;
612 bts_pmu.event_init = bts_event_init;
613 bts_pmu.add = bts_event_add;
614 bts_pmu.del = bts_event_del;
615 bts_pmu.start = bts_event_start;
616 bts_pmu.stop = bts_event_stop;
617 bts_pmu.read = bts_event_read;
618 bts_pmu.setup_aux = bts_buffer_setup_aux;
619 bts_pmu.free_aux = bts_buffer_free_aux;
621 return perf_pmu_register(&bts_pmu, "intel_bts", -1);
623 arch_initcall(bts_init);