1 // SPDX-License-Identifier: GPL-2.0
3 * Performance events ring-buffer code:
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
11 #include <linux/perf_event.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/circ_buf.h>
15 #include <linux/poll.h>
16 #include <linux/nospec.h>
20 static void perf_output_wakeup(struct perf_output_handle *handle)
22 atomic_set(&handle->rb->poll, EPOLLIN);
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
33 * We only publish the head (and generate a wakeup) when the outer-most
36 static void perf_output_get_handle(struct perf_output_handle *handle)
38 struct ring_buffer *rb = handle->rb;
43 * Avoid an explicit LOAD/STORE such that architectures with memops
46 (*(volatile unsigned int *)&rb->nest)++;
47 handle->wakeup = local_read(&rb->wakeup);
50 static void perf_output_put_handle(struct perf_output_handle *handle)
52 struct ring_buffer *rb = handle->rb;
57 * If this isn't the outermost nesting, we don't have to update
58 * @rb->user_page->data_head.
60 nest = READ_ONCE(rb->nest);
62 WRITE_ONCE(rb->nest, nest - 1);
68 * In order to avoid publishing a head value that goes backwards,
69 * we must ensure the load of @rb->head happens after we've
70 * incremented @rb->nest.
72 * Otherwise we can observe a @rb->head value before one published
73 * by an IRQ/NMI happening between the load and the increment.
76 head = local_read(&rb->head);
79 * IRQ/NMI can happen here and advance @rb->head, causing our
80 * load above to be stale.
84 * Since the mmap() consumer (userspace) can run on a different CPU:
88 * if (LOAD ->data_tail) { LOAD ->data_head
90 * STORE $data LOAD $data
91 * smp_wmb() (B) smp_mb() (D)
92 * STORE ->data_head STORE ->data_tail
95 * Where A pairs with D, and B pairs with C.
97 * In our case (A) is a control dependency that separates the load of
98 * the ->data_tail and the stores of $data. In case ->data_tail
99 * indicates there is no room in the buffer to store $data we do not.
101 * D needs to be a full barrier since it separates the data READ
102 * from the tail WRITE.
104 * For B a WMB is sufficient since it separates two WRITEs, and for C
105 * an RMB is sufficient since it separates two READs.
107 * See perf_output_begin().
109 smp_wmb(); /* B, matches C */
110 WRITE_ONCE(rb->user_page->data_head, head);
113 * We must publish the head before decrementing the nest count,
114 * otherwise an IRQ/NMI can publish a more recent head value and our
115 * write will (temporarily) publish a stale value.
118 WRITE_ONCE(rb->nest, 0);
121 * Ensure we decrement @rb->nest before we validate the @rb->head.
122 * Otherwise we cannot be sure we caught the 'last' nested update.
125 if (unlikely(head != local_read(&rb->head))) {
126 WRITE_ONCE(rb->nest, 1);
130 if (handle->wakeup != local_read(&rb->wakeup))
131 perf_output_wakeup(handle);
137 static __always_inline bool
138 ring_buffer_has_space(unsigned long head, unsigned long tail,
139 unsigned long data_size, unsigned int size,
143 return CIRC_SPACE(head, tail, data_size) >= size;
145 return CIRC_SPACE(tail, head, data_size) >= size;
148 static __always_inline int
149 __perf_output_begin(struct perf_output_handle *handle,
150 struct perf_event *event, unsigned int size,
153 struct ring_buffer *rb;
154 unsigned long tail, offset, head;
155 int have_lost, page_shift;
157 struct perf_event_header header;
164 * For inherited events we send all the output towards the parent.
167 event = event->parent;
169 rb = rcu_dereference(event->rb);
173 if (unlikely(rb->paused)) {
175 local_inc(&rb->lost);
176 atomic64_inc(&event->lost_samples);
182 handle->event = event;
184 have_lost = local_read(&rb->lost);
185 if (unlikely(have_lost)) {
186 size += sizeof(lost_event);
187 if (event->attr.sample_id_all)
188 size += event->id_header_size;
191 perf_output_get_handle(handle);
194 tail = READ_ONCE(rb->user_page->data_tail);
195 offset = head = local_read(&rb->head);
196 if (!rb->overwrite) {
197 if (unlikely(!ring_buffer_has_space(head, tail,
204 * The above forms a control dependency barrier separating the
205 * @tail load above from the data stores below. Since the @tail
206 * load is required to compute the branch to fail below.
208 * A, matches D; the full memory barrier userspace SHOULD issue
209 * after reading the data and before storing the new tail
212 * See perf_output_put_handle().
219 } while (local_cmpxchg(&rb->head, offset, head) != offset);
227 * We rely on the implied barrier() by local_cmpxchg() to ensure
228 * none of the data stores below can be lifted up by the compiler.
231 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
232 local_add(rb->watermark, &rb->wakeup);
234 page_shift = PAGE_SHIFT + page_order(rb);
236 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
237 offset &= (1UL << page_shift) - 1;
238 handle->addr = rb->data_pages[handle->page] + offset;
239 handle->size = (1UL << page_shift) - offset;
241 if (unlikely(have_lost)) {
242 struct perf_sample_data sample_data;
244 lost_event.header.size = sizeof(lost_event);
245 lost_event.header.type = PERF_RECORD_LOST;
246 lost_event.header.misc = 0;
247 lost_event.id = event->id;
248 lost_event.lost = local_xchg(&rb->lost, 0);
250 perf_event_header__init_id(&lost_event.header,
251 &sample_data, event);
252 perf_output_put(handle, lost_event);
253 perf_event__output_id_sample(event, handle, &sample_data);
259 local_inc(&rb->lost);
260 atomic64_inc(&event->lost_samples);
261 perf_output_put_handle(handle);
268 int perf_output_begin_forward(struct perf_output_handle *handle,
269 struct perf_event *event, unsigned int size)
271 return __perf_output_begin(handle, event, size, false);
274 int perf_output_begin_backward(struct perf_output_handle *handle,
275 struct perf_event *event, unsigned int size)
277 return __perf_output_begin(handle, event, size, true);
280 int perf_output_begin(struct perf_output_handle *handle,
281 struct perf_event *event, unsigned int size)
284 return __perf_output_begin(handle, event, size,
285 unlikely(is_write_backward(event)));
288 unsigned int perf_output_copy(struct perf_output_handle *handle,
289 const void *buf, unsigned int len)
291 return __output_copy(handle, buf, len);
294 unsigned int perf_output_skip(struct perf_output_handle *handle,
297 return __output_skip(handle, NULL, len);
300 void perf_output_end(struct perf_output_handle *handle)
302 perf_output_put_handle(handle);
307 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
309 long max_size = perf_data_size(rb);
312 rb->watermark = min(max_size, watermark);
315 rb->watermark = max_size / 2;
317 if (flags & RING_BUFFER_WRITABLE)
322 refcount_set(&rb->refcount, 1);
324 INIT_LIST_HEAD(&rb->event_list);
325 spin_lock_init(&rb->event_lock);
328 * perf_output_begin() only checks rb->paused, therefore
329 * rb->paused must be true if we have no pages for output.
335 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
338 * OVERWRITE is determined by perf_aux_output_end() and can't
339 * be passed in directly.
341 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
344 handle->aux_flags |= flags;
346 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
349 * This is called before hardware starts writing to the AUX area to
350 * obtain an output handle and make sure there's room in the buffer.
351 * When the capture completes, call perf_aux_output_end() to commit
352 * the recorded data to the buffer.
354 * The ordering is similar to that of perf_output_{begin,end}, with
355 * the exception of (B), which should be taken care of by the pmu
356 * driver, since ordering rules will differ depending on hardware.
358 * Call this from pmu::start(); see the comment in perf_aux_output_end()
359 * about its use in pmu callbacks. Both can also be called from the PMI
362 void *perf_aux_output_begin(struct perf_output_handle *handle,
363 struct perf_event *event)
365 struct perf_event *output_event = event;
366 unsigned long aux_head, aux_tail;
367 struct ring_buffer *rb;
370 if (output_event->parent)
371 output_event = output_event->parent;
374 * Since this will typically be open across pmu::add/pmu::del, we
375 * grab ring_buffer's refcount instead of holding rcu read lock
376 * to make sure it doesn't disappear under us.
378 rb = ring_buffer_get(output_event);
386 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
387 * about to get freed, so we leave immediately.
389 * Checking rb::aux_mmap_count and rb::refcount has to be done in
390 * the same order, see perf_mmap_close. Otherwise we end up freeing
391 * aux pages in this path, which is a bug, because in_atomic().
393 if (!atomic_read(&rb->aux_mmap_count))
396 if (!refcount_inc_not_zero(&rb->aux_refcount))
399 nest = READ_ONCE(rb->aux_nest);
401 * Nesting is not supported for AUX area, make sure nested
402 * writers are caught early
404 if (WARN_ON_ONCE(nest))
407 WRITE_ONCE(rb->aux_nest, nest + 1);
409 aux_head = rb->aux_head;
412 handle->event = event;
413 handle->head = aux_head;
415 handle->aux_flags = 0;
418 * In overwrite mode, AUX data stores do not depend on aux_tail,
419 * therefore (A) control dependency barrier does not exist. The
420 * (B) <-> (C) ordering is still observed by the pmu driver.
422 if (!rb->aux_overwrite) {
423 aux_tail = READ_ONCE(rb->user_page->aux_tail);
424 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
425 if (aux_head - aux_tail < perf_aux_size(rb))
426 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
429 * handle->size computation depends on aux_tail load; this forms a
430 * control dependency barrier separating aux_tail load from aux data
431 * store that will be enabled on successful return
433 if (!handle->size) { /* A, matches D */
434 event->pending_disable = smp_processor_id();
435 perf_output_wakeup(handle);
436 WRITE_ONCE(rb->aux_nest, 0);
441 return handle->rb->aux_priv;
449 handle->event = NULL;
453 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
455 static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
457 if (rb->aux_overwrite)
460 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
461 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
469 * Commit the data written by hardware into the ring buffer by adjusting
470 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
471 * pmu driver's responsibility to observe ordering rules of the hardware,
472 * so that all the data is externally visible before this is called.
474 * Note: this has to be called from pmu::stop() callback, as the assumption
475 * of the AUX buffer management code is that after pmu::stop(), the AUX
476 * transaction must be stopped and therefore drop the AUX reference count.
478 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
480 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
481 struct ring_buffer *rb = handle->rb;
482 unsigned long aux_head;
484 /* in overwrite mode, driver provides aux_head via handle */
485 if (rb->aux_overwrite) {
486 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
488 aux_head = handle->head;
489 rb->aux_head = aux_head;
491 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
493 aux_head = rb->aux_head;
494 rb->aux_head += size;
498 * Only send RECORD_AUX if we have something useful to communicate
500 * Note: the OVERWRITE records by themselves are not considered
501 * useful, as they don't communicate any *new* information,
502 * aside from the short-lived offset, that becomes history at
503 * the next event sched-in and therefore isn't useful.
504 * The userspace that needs to copy out AUX data in overwrite
505 * mode should know to use user_page::aux_head for the actual
506 * offset. So, from now on we don't output AUX records that
507 * have *only* OVERWRITE flag set.
509 if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
510 perf_event_aux_event(handle->event, aux_head, size,
513 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
514 if (rb_need_aux_wakeup(rb))
518 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
519 handle->event->pending_disable = smp_processor_id();
520 perf_output_wakeup(handle);
523 handle->event = NULL;
525 WRITE_ONCE(rb->aux_nest, 0);
530 EXPORT_SYMBOL_GPL(perf_aux_output_end);
533 * Skip over a given number of bytes in the AUX buffer, due to, for example,
534 * hardware's alignment constraints.
536 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
538 struct ring_buffer *rb = handle->rb;
540 if (size > handle->size)
543 rb->aux_head += size;
545 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
546 if (rb_need_aux_wakeup(rb)) {
547 perf_output_wakeup(handle);
548 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
551 handle->head = rb->aux_head;
552 handle->size -= size;
556 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
558 void *perf_get_aux(struct perf_output_handle *handle)
560 /* this is only valid between perf_aux_output_begin and *_end */
564 return handle->rb->aux_priv;
566 EXPORT_SYMBOL_GPL(perf_get_aux);
568 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
570 static struct page *rb_alloc_aux_page(int node, int order)
574 if (order > MAX_ORDER)
578 page = alloc_pages_node(node, PERF_AUX_GFP, order);
579 } while (!page && order--);
583 * Communicate the allocation size to the driver:
584 * if we managed to secure a high-order allocation,
585 * set its first page's private to this order;
586 * !PagePrivate(page) means it's just a normal page.
588 split_page(page, order);
589 SetPagePrivate(page);
590 set_page_private(page, order);
596 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
598 struct page *page = virt_to_page(rb->aux_pages[idx]);
600 ClearPagePrivate(page);
601 page->mapping = NULL;
605 static void __rb_free_aux(struct ring_buffer *rb)
610 * Should never happen, the last reference should be dropped from
611 * perf_mmap_close() path, which first stops aux transactions (which
612 * in turn are the atomic holders of aux_refcount) and then does the
613 * last rb_free_aux().
615 WARN_ON_ONCE(in_atomic());
618 rb->free_aux(rb->aux_priv);
623 if (rb->aux_nr_pages) {
624 for (pg = 0; pg < rb->aux_nr_pages; pg++)
625 rb_free_aux_page(rb, pg);
627 kfree(rb->aux_pages);
628 rb->aux_nr_pages = 0;
632 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
633 pgoff_t pgoff, int nr_pages, long watermark, int flags)
635 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
636 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
637 int ret = -ENOMEM, max_order;
643 * We need to start with the max_order that fits in nr_pages,
644 * not the other way around, hence ilog2() and not get_order.
646 max_order = ilog2(nr_pages);
649 * PMU requests more than one contiguous chunks of memory
650 * for SW double buffering
660 * kcalloc_node() is unable to allocate buffer if the size is larger
661 * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
663 if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
665 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
670 rb->free_aux = event->pmu->free_aux;
671 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
675 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
676 page = rb_alloc_aux_page(node, order);
680 for (last = rb->aux_nr_pages + (1 << page_private(page));
681 last > rb->aux_nr_pages; rb->aux_nr_pages++)
682 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
686 * In overwrite mode, PMUs that don't support SG may not handle more
687 * than one contiguous allocation, since they rely on PMI to do double
688 * buffering. In this case, the entire buffer has to be one contiguous
691 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
693 struct page *page = virt_to_page(rb->aux_pages[0]);
695 if (page_private(page) != max_order)
699 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
707 * aux_pages (and pmu driver's private data, aux_priv) will be
708 * referenced in both producer's and consumer's contexts, thus
709 * we keep a refcount here to make sure either of the two can
710 * reference them safely.
712 refcount_set(&rb->aux_refcount, 1);
714 rb->aux_overwrite = overwrite;
715 rb->aux_watermark = watermark;
717 if (!rb->aux_watermark && !rb->aux_overwrite)
718 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
722 rb->aux_pgoff = pgoff;
729 void rb_free_aux(struct ring_buffer *rb)
731 if (refcount_dec_and_test(&rb->aux_refcount))
735 #ifndef CONFIG_PERF_USE_VMALLOC
738 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
742 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
744 if (pgoff > rb->nr_pages)
748 return virt_to_page(rb->user_page);
750 return virt_to_page(rb->data_pages[pgoff - 1]);
753 static void *perf_mmap_alloc_page(int cpu)
758 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
759 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
763 return page_address(page);
766 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
768 struct ring_buffer *rb;
772 size = sizeof(struct ring_buffer);
773 size += nr_pages * sizeof(void *);
775 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
778 rb = kzalloc(size, GFP_KERNEL);
782 rb->user_page = perf_mmap_alloc_page(cpu);
786 for (i = 0; i < nr_pages; i++) {
787 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
788 if (!rb->data_pages[i])
789 goto fail_data_pages;
792 rb->nr_pages = nr_pages;
794 ring_buffer_init(rb, watermark, flags);
799 for (i--; i >= 0; i--)
800 free_page((unsigned long)rb->data_pages[i]);
802 free_page((unsigned long)rb->user_page);
811 static void perf_mmap_free_page(unsigned long addr)
813 struct page *page = virt_to_page((void *)addr);
815 page->mapping = NULL;
819 void rb_free(struct ring_buffer *rb)
823 perf_mmap_free_page((unsigned long)rb->user_page);
824 for (i = 0; i < rb->nr_pages; i++)
825 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
830 static int data_page_nr(struct ring_buffer *rb)
832 return rb->nr_pages << page_order(rb);
836 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
838 /* The '>' counts in the user page. */
839 if (pgoff > data_page_nr(rb))
842 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
845 static void perf_mmap_unmark_page(void *addr)
847 struct page *page = vmalloc_to_page(addr);
849 page->mapping = NULL;
852 static void rb_free_work(struct work_struct *work)
854 struct ring_buffer *rb;
858 rb = container_of(work, struct ring_buffer, work);
859 nr = data_page_nr(rb);
861 base = rb->user_page;
862 /* The '<=' counts in the user page. */
863 for (i = 0; i <= nr; i++)
864 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
870 void rb_free(struct ring_buffer *rb)
872 schedule_work(&rb->work);
875 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
877 struct ring_buffer *rb;
881 size = sizeof(struct ring_buffer);
882 size += sizeof(void *);
884 rb = kzalloc(size, GFP_KERNEL);
888 INIT_WORK(&rb->work, rb_free_work);
890 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
894 rb->user_page = all_buf;
895 rb->data_pages[0] = all_buf + PAGE_SIZE;
898 rb->page_order = ilog2(nr_pages);
901 ring_buffer_init(rb, watermark, flags);
915 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
917 if (rb->aux_nr_pages) {
918 /* above AUX space */
919 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
923 if (pgoff >= rb->aux_pgoff) {
924 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
925 return virt_to_page(rb->aux_pages[aux_pgoff]);
929 return __perf_mmap_to_page(rb, pgoff);