1 // SPDX-License-Identifier: GPL-2.0
3 * Performance events ring-buffer code:
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
11 #include <linux/perf_event.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/circ_buf.h>
15 #include <linux/poll.h>
16 #include <linux/nospec.h>
20 static void perf_output_wakeup(struct perf_output_handle *handle)
22 atomic_set(&handle->rb->poll, EPOLLIN);
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending_irq);
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
33 * We only publish the head (and generate a wakeup) when the outer-most
36 static void perf_output_get_handle(struct perf_output_handle *handle)
38 struct perf_buffer *rb = handle->rb;
43 * Avoid an explicit LOAD/STORE such that architectures with memops
46 (*(volatile unsigned int *)&rb->nest)++;
47 handle->wakeup = local_read(&rb->wakeup);
50 static void perf_output_put_handle(struct perf_output_handle *handle)
52 struct perf_buffer *rb = handle->rb;
57 * If this isn't the outermost nesting, we don't have to update
58 * @rb->user_page->data_head.
60 nest = READ_ONCE(rb->nest);
62 WRITE_ONCE(rb->nest, nest - 1);
68 * In order to avoid publishing a head value that goes backwards,
69 * we must ensure the load of @rb->head happens after we've
70 * incremented @rb->nest.
72 * Otherwise we can observe a @rb->head value before one published
73 * by an IRQ/NMI happening between the load and the increment.
76 head = local_read(&rb->head);
79 * IRQ/NMI can happen here and advance @rb->head, causing our
80 * load above to be stale.
84 * Since the mmap() consumer (userspace) can run on a different CPU:
88 * if (LOAD ->data_tail) { LOAD ->data_head
90 * STORE $data LOAD $data
91 * smp_wmb() (B) smp_mb() (D)
92 * STORE ->data_head STORE ->data_tail
95 * Where A pairs with D, and B pairs with C.
97 * In our case (A) is a control dependency that separates the load of
98 * the ->data_tail and the stores of $data. In case ->data_tail
99 * indicates there is no room in the buffer to store $data we do not.
101 * D needs to be a full barrier since it separates the data READ
102 * from the tail WRITE.
104 * For B a WMB is sufficient since it separates two WRITEs, and for C
105 * an RMB is sufficient since it separates two READs.
107 * See perf_output_begin().
109 smp_wmb(); /* B, matches C */
110 WRITE_ONCE(rb->user_page->data_head, head);
113 * We must publish the head before decrementing the nest count,
114 * otherwise an IRQ/NMI can publish a more recent head value and our
115 * write will (temporarily) publish a stale value.
118 WRITE_ONCE(rb->nest, 0);
121 * Ensure we decrement @rb->nest before we validate the @rb->head.
122 * Otherwise we cannot be sure we caught the 'last' nested update.
125 if (unlikely(head != local_read(&rb->head))) {
126 WRITE_ONCE(rb->nest, 1);
130 if (handle->wakeup != local_read(&rb->wakeup))
131 perf_output_wakeup(handle);
137 static __always_inline bool
138 ring_buffer_has_space(unsigned long head, unsigned long tail,
139 unsigned long data_size, unsigned int size,
143 return CIRC_SPACE(head, tail, data_size) >= size;
145 return CIRC_SPACE(tail, head, data_size) >= size;
148 static __always_inline int
149 __perf_output_begin(struct perf_output_handle *handle,
150 struct perf_sample_data *data,
151 struct perf_event *event, unsigned int size,
154 struct perf_buffer *rb;
155 unsigned long tail, offset, head;
156 int have_lost, page_shift;
158 struct perf_event_header header;
165 * For inherited events we send all the output towards the parent.
168 event = event->parent;
170 rb = rcu_dereference(event->rb);
174 if (unlikely(rb->paused)) {
176 local_inc(&rb->lost);
177 atomic64_inc(&event->lost_samples);
183 handle->event = event;
185 have_lost = local_read(&rb->lost);
186 if (unlikely(have_lost)) {
187 size += sizeof(lost_event);
188 if (event->attr.sample_id_all)
189 size += event->id_header_size;
192 perf_output_get_handle(handle);
194 offset = local_read(&rb->head);
197 tail = READ_ONCE(rb->user_page->data_tail);
198 if (!rb->overwrite) {
199 if (unlikely(!ring_buffer_has_space(head, tail,
206 * The above forms a control dependency barrier separating the
207 * @tail load above from the data stores below. Since the @tail
208 * load is required to compute the branch to fail below.
210 * A, matches D; the full memory barrier userspace SHOULD issue
211 * after reading the data and before storing the new tail
214 * See perf_output_put_handle().
221 } while (!local_try_cmpxchg(&rb->head, &offset, head));
229 * We rely on the implied barrier() by local_cmpxchg() to ensure
230 * none of the data stores below can be lifted up by the compiler.
233 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
234 local_add(rb->watermark, &rb->wakeup);
236 page_shift = PAGE_SHIFT + page_order(rb);
238 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
239 offset &= (1UL << page_shift) - 1;
240 handle->addr = rb->data_pages[handle->page] + offset;
241 handle->size = (1UL << page_shift) - offset;
243 if (unlikely(have_lost)) {
244 lost_event.header.size = sizeof(lost_event);
245 lost_event.header.type = PERF_RECORD_LOST;
246 lost_event.header.misc = 0;
247 lost_event.id = event->id;
248 lost_event.lost = local_xchg(&rb->lost, 0);
250 /* XXX mostly redundant; @data is already fully initializes */
251 perf_event_header__init_id(&lost_event.header, data, event);
252 perf_output_put(handle, lost_event);
253 perf_event__output_id_sample(event, handle, data);
259 local_inc(&rb->lost);
260 atomic64_inc(&event->lost_samples);
261 perf_output_put_handle(handle);
268 int perf_output_begin_forward(struct perf_output_handle *handle,
269 struct perf_sample_data *data,
270 struct perf_event *event, unsigned int size)
272 return __perf_output_begin(handle, data, event, size, false);
275 int perf_output_begin_backward(struct perf_output_handle *handle,
276 struct perf_sample_data *data,
277 struct perf_event *event, unsigned int size)
279 return __perf_output_begin(handle, data, event, size, true);
282 int perf_output_begin(struct perf_output_handle *handle,
283 struct perf_sample_data *data,
284 struct perf_event *event, unsigned int size)
287 return __perf_output_begin(handle, data, event, size,
288 unlikely(is_write_backward(event)));
291 unsigned int perf_output_copy(struct perf_output_handle *handle,
292 const void *buf, unsigned int len)
294 return __output_copy(handle, buf, len);
297 unsigned int perf_output_skip(struct perf_output_handle *handle,
300 return __output_skip(handle, NULL, len);
303 void perf_output_end(struct perf_output_handle *handle)
305 perf_output_put_handle(handle);
310 ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
312 long max_size = perf_data_size(rb);
315 rb->watermark = min(max_size, watermark);
318 rb->watermark = max_size / 2;
320 if (flags & RING_BUFFER_WRITABLE)
325 refcount_set(&rb->refcount, 1);
327 INIT_LIST_HEAD(&rb->event_list);
328 spin_lock_init(&rb->event_lock);
331 * perf_output_begin() only checks rb->paused, therefore
332 * rb->paused must be true if we have no pages for output.
338 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
341 * OVERWRITE is determined by perf_aux_output_end() and can't
342 * be passed in directly.
344 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
347 handle->aux_flags |= flags;
349 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
352 * This is called before hardware starts writing to the AUX area to
353 * obtain an output handle and make sure there's room in the buffer.
354 * When the capture completes, call perf_aux_output_end() to commit
355 * the recorded data to the buffer.
357 * The ordering is similar to that of perf_output_{begin,end}, with
358 * the exception of (B), which should be taken care of by the pmu
359 * driver, since ordering rules will differ depending on hardware.
361 * Call this from pmu::start(); see the comment in perf_aux_output_end()
362 * about its use in pmu callbacks. Both can also be called from the PMI
365 void *perf_aux_output_begin(struct perf_output_handle *handle,
366 struct perf_event *event)
368 struct perf_event *output_event = event;
369 unsigned long aux_head, aux_tail;
370 struct perf_buffer *rb;
373 if (output_event->parent)
374 output_event = output_event->parent;
377 * Since this will typically be open across pmu::add/pmu::del, we
378 * grab ring_buffer's refcount instead of holding rcu read lock
379 * to make sure it doesn't disappear under us.
381 rb = ring_buffer_get(output_event);
389 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
390 * about to get freed, so we leave immediately.
392 * Checking rb::aux_mmap_count and rb::refcount has to be done in
393 * the same order, see perf_mmap_close. Otherwise we end up freeing
394 * aux pages in this path, which is a bug, because in_atomic().
396 if (!atomic_read(&rb->aux_mmap_count))
399 if (!refcount_inc_not_zero(&rb->aux_refcount))
402 nest = READ_ONCE(rb->aux_nest);
404 * Nesting is not supported for AUX area, make sure nested
405 * writers are caught early
407 if (WARN_ON_ONCE(nest))
410 WRITE_ONCE(rb->aux_nest, nest + 1);
412 aux_head = rb->aux_head;
415 handle->event = event;
416 handle->head = aux_head;
418 handle->aux_flags = 0;
421 * In overwrite mode, AUX data stores do not depend on aux_tail,
422 * therefore (A) control dependency barrier does not exist. The
423 * (B) <-> (C) ordering is still observed by the pmu driver.
425 if (!rb->aux_overwrite) {
426 aux_tail = READ_ONCE(rb->user_page->aux_tail);
427 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
428 if (aux_head - aux_tail < perf_aux_size(rb))
429 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
432 * handle->size computation depends on aux_tail load; this forms a
433 * control dependency barrier separating aux_tail load from aux data
434 * store that will be enabled on successful return
436 if (!handle->size) { /* A, matches D */
437 event->pending_disable = smp_processor_id();
438 perf_output_wakeup(handle);
439 WRITE_ONCE(rb->aux_nest, 0);
444 return handle->rb->aux_priv;
452 handle->event = NULL;
456 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
458 static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
460 if (rb->aux_overwrite)
463 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
464 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
472 * Commit the data written by hardware into the ring buffer by adjusting
473 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
474 * pmu driver's responsibility to observe ordering rules of the hardware,
475 * so that all the data is externally visible before this is called.
477 * Note: this has to be called from pmu::stop() callback, as the assumption
478 * of the AUX buffer management code is that after pmu::stop(), the AUX
479 * transaction must be stopped and therefore drop the AUX reference count.
481 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
483 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
484 struct perf_buffer *rb = handle->rb;
485 unsigned long aux_head;
487 /* in overwrite mode, driver provides aux_head via handle */
488 if (rb->aux_overwrite) {
489 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
491 aux_head = handle->head;
492 rb->aux_head = aux_head;
494 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
496 aux_head = rb->aux_head;
497 rb->aux_head += size;
501 * Only send RECORD_AUX if we have something useful to communicate
503 * Note: the OVERWRITE records by themselves are not considered
504 * useful, as they don't communicate any *new* information,
505 * aside from the short-lived offset, that becomes history at
506 * the next event sched-in and therefore isn't useful.
507 * The userspace that needs to copy out AUX data in overwrite
508 * mode should know to use user_page::aux_head for the actual
509 * offset. So, from now on we don't output AUX records that
510 * have *only* OVERWRITE flag set.
512 if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
513 perf_event_aux_event(handle->event, aux_head, size,
516 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
517 if (rb_need_aux_wakeup(rb))
521 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
522 handle->event->pending_disable = smp_processor_id();
523 perf_output_wakeup(handle);
526 handle->event = NULL;
528 WRITE_ONCE(rb->aux_nest, 0);
533 EXPORT_SYMBOL_GPL(perf_aux_output_end);
536 * Skip over a given number of bytes in the AUX buffer, due to, for example,
537 * hardware's alignment constraints.
539 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
541 struct perf_buffer *rb = handle->rb;
543 if (size > handle->size)
546 rb->aux_head += size;
548 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
549 if (rb_need_aux_wakeup(rb)) {
550 perf_output_wakeup(handle);
551 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
554 handle->head = rb->aux_head;
555 handle->size -= size;
559 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
561 void *perf_get_aux(struct perf_output_handle *handle)
563 /* this is only valid between perf_aux_output_begin and *_end */
567 return handle->rb->aux_priv;
569 EXPORT_SYMBOL_GPL(perf_get_aux);
572 * Copy out AUX data from an AUX handle.
574 long perf_output_copy_aux(struct perf_output_handle *aux_handle,
575 struct perf_output_handle *handle,
576 unsigned long from, unsigned long to)
578 struct perf_buffer *rb = aux_handle->rb;
579 unsigned long tocopy, remainder, len = 0;
582 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
583 to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
586 tocopy = PAGE_SIZE - offset_in_page(from);
588 tocopy = min(tocopy, to - from);
592 addr = rb->aux_pages[from >> PAGE_SHIFT];
593 addr += offset_in_page(from);
595 remainder = perf_output_copy(handle, addr, tocopy);
601 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
602 } while (to != from);
607 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
609 static struct page *rb_alloc_aux_page(int node, int order)
613 if (order > MAX_ORDER)
617 page = alloc_pages_node(node, PERF_AUX_GFP, order);
618 } while (!page && order--);
622 * Communicate the allocation size to the driver:
623 * if we managed to secure a high-order allocation,
624 * set its first page's private to this order;
625 * !PagePrivate(page) means it's just a normal page.
627 split_page(page, order);
628 SetPagePrivate(page);
629 set_page_private(page, order);
635 static void rb_free_aux_page(struct perf_buffer *rb, int idx)
637 struct page *page = virt_to_page(rb->aux_pages[idx]);
639 ClearPagePrivate(page);
640 page->mapping = NULL;
644 static void __rb_free_aux(struct perf_buffer *rb)
649 * Should never happen, the last reference should be dropped from
650 * perf_mmap_close() path, which first stops aux transactions (which
651 * in turn are the atomic holders of aux_refcount) and then does the
652 * last rb_free_aux().
654 WARN_ON_ONCE(in_atomic());
657 rb->free_aux(rb->aux_priv);
662 if (rb->aux_nr_pages) {
663 for (pg = 0; pg < rb->aux_nr_pages; pg++)
664 rb_free_aux_page(rb, pg);
666 kfree(rb->aux_pages);
667 rb->aux_nr_pages = 0;
671 int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
672 pgoff_t pgoff, int nr_pages, long watermark, int flags)
674 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
675 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
676 int ret = -ENOMEM, max_order;
683 * Watermark defaults to half the buffer, and so does the
684 * max_order, to aid PMU drivers in double buffering.
687 watermark = nr_pages << (PAGE_SHIFT - 1);
690 * Use aux_watermark as the basis for chunking to
691 * help PMU drivers honor the watermark.
693 max_order = get_order(watermark);
696 * We need to start with the max_order that fits in nr_pages,
697 * not the other way around, hence ilog2() and not get_order.
699 max_order = ilog2(nr_pages);
704 * kcalloc_node() is unable to allocate buffer if the size is larger
705 * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
707 if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
709 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
714 rb->free_aux = event->pmu->free_aux;
715 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
719 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
720 page = rb_alloc_aux_page(node, order);
724 for (last = rb->aux_nr_pages + (1 << page_private(page));
725 last > rb->aux_nr_pages; rb->aux_nr_pages++)
726 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
730 * In overwrite mode, PMUs that don't support SG may not handle more
731 * than one contiguous allocation, since they rely on PMI to do double
732 * buffering. In this case, the entire buffer has to be one contiguous
735 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
737 struct page *page = virt_to_page(rb->aux_pages[0]);
739 if (page_private(page) != max_order)
743 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
751 * aux_pages (and pmu driver's private data, aux_priv) will be
752 * referenced in both producer's and consumer's contexts, thus
753 * we keep a refcount here to make sure either of the two can
754 * reference them safely.
756 refcount_set(&rb->aux_refcount, 1);
758 rb->aux_overwrite = overwrite;
759 rb->aux_watermark = watermark;
763 rb->aux_pgoff = pgoff;
770 void rb_free_aux(struct perf_buffer *rb)
772 if (refcount_dec_and_test(&rb->aux_refcount))
776 #ifndef CONFIG_PERF_USE_VMALLOC
779 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
783 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
785 if (pgoff > rb->nr_pages)
789 return virt_to_page(rb->user_page);
791 return virt_to_page(rb->data_pages[pgoff - 1]);
794 static void *perf_mmap_alloc_page(int cpu)
799 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
800 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
804 return page_address(page);
807 static void perf_mmap_free_page(void *addr)
809 struct page *page = virt_to_page(addr);
811 page->mapping = NULL;
815 struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
817 struct perf_buffer *rb;
821 size = sizeof(struct perf_buffer);
822 size += nr_pages * sizeof(void *);
824 if (order_base_2(size) > PAGE_SHIFT+MAX_ORDER)
827 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
828 rb = kzalloc_node(size, GFP_KERNEL, node);
832 rb->user_page = perf_mmap_alloc_page(cpu);
836 for (i = 0; i < nr_pages; i++) {
837 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
838 if (!rb->data_pages[i])
839 goto fail_data_pages;
842 rb->nr_pages = nr_pages;
844 ring_buffer_init(rb, watermark, flags);
849 for (i--; i >= 0; i--)
850 perf_mmap_free_page(rb->data_pages[i]);
852 perf_mmap_free_page(rb->user_page);
861 void rb_free(struct perf_buffer *rb)
865 perf_mmap_free_page(rb->user_page);
866 for (i = 0; i < rb->nr_pages; i++)
867 perf_mmap_free_page(rb->data_pages[i]);
873 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
875 /* The '>' counts in the user page. */
876 if (pgoff > data_page_nr(rb))
879 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
882 static void perf_mmap_unmark_page(void *addr)
884 struct page *page = vmalloc_to_page(addr);
886 page->mapping = NULL;
889 static void rb_free_work(struct work_struct *work)
891 struct perf_buffer *rb;
895 rb = container_of(work, struct perf_buffer, work);
896 nr = data_page_nr(rb);
898 base = rb->user_page;
899 /* The '<=' counts in the user page. */
900 for (i = 0; i <= nr; i++)
901 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
907 void rb_free(struct perf_buffer *rb)
909 schedule_work(&rb->work);
912 struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
914 struct perf_buffer *rb;
919 size = sizeof(struct perf_buffer);
920 size += sizeof(void *);
922 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
923 rb = kzalloc_node(size, GFP_KERNEL, node);
927 INIT_WORK(&rb->work, rb_free_work);
929 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
933 rb->user_page = all_buf;
934 rb->data_pages[0] = all_buf + PAGE_SIZE;
937 rb->page_order = ilog2(nr_pages);
940 ring_buffer_init(rb, watermark, flags);
954 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
956 if (rb->aux_nr_pages) {
957 /* above AUX space */
958 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
962 if (pgoff >= rb->aux_pgoff) {
963 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
964 return virt_to_page(rb->aux_pages[aux_pgoff]);
968 return __perf_mmap_to_page(rb, pgoff);