2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
17 #include <linux/nospec.h>
21 static void perf_output_wakeup(struct perf_output_handle *handle)
23 atomic_set(&handle->rb->poll, POLLIN);
25 handle->event->pending_wakeup = 1;
26 irq_work_queue(&handle->event->pending);
30 * We need to ensure a later event_id doesn't publish a head when a former
31 * event isn't done writing. However since we need to deal with NMIs we
32 * cannot fully serialize things.
34 * We only publish the head (and generate a wakeup) when the outer-most
37 static void perf_output_get_handle(struct perf_output_handle *handle)
39 struct ring_buffer *rb = handle->rb;
43 handle->wakeup = local_read(&rb->wakeup);
46 static void perf_output_put_handle(struct perf_output_handle *handle)
48 struct ring_buffer *rb = handle->rb;
53 * In order to avoid publishing a head value that goes backwards,
54 * we must ensure the load of @rb->head happens after we've
55 * incremented @rb->nest.
57 * Otherwise we can observe a @rb->head value before one published
58 * by an IRQ/NMI happening between the load and the increment.
61 head = local_read(&rb->head);
64 * IRQ/NMI can happen here and advance @rb->head, causing our
65 * load above to be stale.
69 * If this isn't the outermost nesting, we don't have to update
70 * @rb->user_page->data_head.
72 if (local_read(&rb->nest) > 1) {
78 * Since the mmap() consumer (userspace) can run on a different CPU:
82 * if (LOAD ->data_tail) { LOAD ->data_head
84 * STORE $data LOAD $data
85 * smp_wmb() (B) smp_mb() (D)
86 * STORE ->data_head STORE ->data_tail
89 * Where A pairs with D, and B pairs with C.
91 * In our case (A) is a control dependency that separates the load of
92 * the ->data_tail and the stores of $data. In case ->data_tail
93 * indicates there is no room in the buffer to store $data we do not.
95 * D needs to be a full barrier since it separates the data READ
96 * from the tail WRITE.
98 * For B a WMB is sufficient since it separates two WRITEs, and for C
99 * an RMB is sufficient since it separates two READs.
101 * See perf_output_begin().
103 smp_wmb(); /* B, matches C */
104 rb->user_page->data_head = head;
107 * We must publish the head before decrementing the nest count,
108 * otherwise an IRQ/NMI can publish a more recent head value and our
109 * write will (temporarily) publish a stale value.
112 local_set(&rb->nest, 0);
115 * Ensure we decrement @rb->nest before we validate the @rb->head.
116 * Otherwise we cannot be sure we caught the 'last' nested update.
119 if (unlikely(head != local_read(&rb->head))) {
120 local_inc(&rb->nest);
124 if (handle->wakeup != local_read(&rb->wakeup))
125 perf_output_wakeup(handle);
131 int perf_output_begin(struct perf_output_handle *handle,
132 struct perf_event *event, unsigned int size)
134 struct ring_buffer *rb;
135 unsigned long tail, offset, head;
136 int have_lost, page_shift;
138 struct perf_event_header header;
145 * For inherited events we send all the output towards the parent.
148 event = event->parent;
150 rb = rcu_dereference(event->rb);
154 if (unlikely(!rb->nr_pages))
158 handle->event = event;
160 have_lost = local_read(&rb->lost);
161 if (unlikely(have_lost)) {
162 size += sizeof(lost_event);
163 if (event->attr.sample_id_all)
164 size += event->id_header_size;
167 perf_output_get_handle(handle);
170 tail = READ_ONCE(rb->user_page->data_tail);
171 offset = head = local_read(&rb->head);
172 if (!rb->overwrite &&
173 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
177 * The above forms a control dependency barrier separating the
178 * @tail load above from the data stores below. Since the @tail
179 * load is required to compute the branch to fail below.
181 * A, matches D; the full memory barrier userspace SHOULD issue
182 * after reading the data and before storing the new tail
185 * See perf_output_put_handle().
189 } while (local_cmpxchg(&rb->head, offset, head) != offset);
192 * We rely on the implied barrier() by local_cmpxchg() to ensure
193 * none of the data stores below can be lifted up by the compiler.
196 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
197 local_add(rb->watermark, &rb->wakeup);
199 page_shift = PAGE_SHIFT + page_order(rb);
201 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
202 offset &= (1UL << page_shift) - 1;
203 handle->addr = rb->data_pages[handle->page] + offset;
204 handle->size = (1UL << page_shift) - offset;
206 if (unlikely(have_lost)) {
207 struct perf_sample_data sample_data;
209 lost_event.header.size = sizeof(lost_event);
210 lost_event.header.type = PERF_RECORD_LOST;
211 lost_event.header.misc = 0;
212 lost_event.id = event->id;
213 lost_event.lost = local_xchg(&rb->lost, 0);
215 perf_event_header__init_id(&lost_event.header,
216 &sample_data, event);
217 perf_output_put(handle, lost_event);
218 perf_event__output_id_sample(event, handle, &sample_data);
224 local_inc(&rb->lost);
225 perf_output_put_handle(handle);
232 unsigned int perf_output_copy(struct perf_output_handle *handle,
233 const void *buf, unsigned int len)
235 return __output_copy(handle, buf, len);
238 unsigned int perf_output_skip(struct perf_output_handle *handle,
241 return __output_skip(handle, NULL, len);
244 void perf_output_end(struct perf_output_handle *handle)
246 perf_output_put_handle(handle);
250 static void rb_irq_work(struct irq_work *work);
253 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
255 long max_size = perf_data_size(rb);
258 rb->watermark = min(max_size, watermark);
261 rb->watermark = max_size / 2;
263 if (flags & RING_BUFFER_WRITABLE)
268 atomic_set(&rb->refcount, 1);
270 INIT_LIST_HEAD(&rb->event_list);
271 spin_lock_init(&rb->event_lock);
272 init_irq_work(&rb->irq_work, rb_irq_work);
275 static void ring_buffer_put_async(struct ring_buffer *rb)
277 if (!atomic_dec_and_test(&rb->refcount))
280 rb->rcu_head.next = (void *)rb;
281 irq_work_queue(&rb->irq_work);
285 * This is called before hardware starts writing to the AUX area to
286 * obtain an output handle and make sure there's room in the buffer.
287 * When the capture completes, call perf_aux_output_end() to commit
288 * the recorded data to the buffer.
290 * The ordering is similar to that of perf_output_{begin,end}, with
291 * the exception of (B), which should be taken care of by the pmu
292 * driver, since ordering rules will differ depending on hardware.
294 void *perf_aux_output_begin(struct perf_output_handle *handle,
295 struct perf_event *event)
297 struct perf_event *output_event = event;
298 unsigned long aux_head, aux_tail;
299 struct ring_buffer *rb;
301 if (output_event->parent)
302 output_event = output_event->parent;
305 * Since this will typically be open across pmu::add/pmu::del, we
306 * grab ring_buffer's refcount instead of holding rcu read lock
307 * to make sure it doesn't disappear under us.
309 rb = ring_buffer_get(output_event);
313 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
317 * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
318 * the aux buffer is in perf_mmap_close(), about to get freed.
320 if (!atomic_read(&rb->aux_mmap_count))
324 * Nesting is not supported for AUX area, make sure nested
325 * writers are caught early
327 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
330 aux_head = local_read(&rb->aux_head);
333 handle->event = event;
334 handle->head = aux_head;
338 * In overwrite mode, AUX data stores do not depend on aux_tail,
339 * therefore (A) control dependency barrier does not exist. The
340 * (B) <-> (C) ordering is still observed by the pmu driver.
342 if (!rb->aux_overwrite) {
343 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
344 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
345 if (aux_head - aux_tail < perf_aux_size(rb))
346 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
349 * handle->size computation depends on aux_tail load; this forms a
350 * control dependency barrier separating aux_tail load from aux data
351 * store that will be enabled on successful return
353 if (!handle->size) { /* A, matches D */
354 event->pending_disable = 1;
355 perf_output_wakeup(handle);
356 local_set(&rb->aux_nest, 0);
361 return handle->rb->aux_priv;
367 ring_buffer_put_async(rb);
368 handle->event = NULL;
374 * Commit the data written by hardware into the ring buffer by adjusting
375 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
376 * pmu driver's responsibility to observe ordering rules of the hardware,
377 * so that all the data is externally visible before this is called.
379 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
382 struct ring_buffer *rb = handle->rb;
383 bool wakeup = truncated;
384 unsigned long aux_head;
388 flags |= PERF_AUX_FLAG_TRUNCATED;
390 /* in overwrite mode, driver provides aux_head via handle */
391 if (rb->aux_overwrite) {
392 flags |= PERF_AUX_FLAG_OVERWRITE;
394 aux_head = handle->head;
395 local_set(&rb->aux_head, aux_head);
397 aux_head = local_read(&rb->aux_head);
398 local_add(size, &rb->aux_head);
403 * Only send RECORD_AUX if we have something useful to communicate
406 perf_event_aux_event(handle->event, aux_head, size, flags);
409 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
411 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
413 local_add(rb->aux_watermark, &rb->aux_wakeup);
418 handle->event->pending_disable = 1;
419 perf_output_wakeup(handle);
422 handle->event = NULL;
424 local_set(&rb->aux_nest, 0);
426 ring_buffer_put_async(rb);
430 * Skip over a given number of bytes in the AUX buffer, due to, for example,
431 * hardware's alignment constraints.
433 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
435 struct ring_buffer *rb = handle->rb;
436 unsigned long aux_head;
438 if (size > handle->size)
441 local_add(size, &rb->aux_head);
443 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
444 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
445 perf_output_wakeup(handle);
446 local_add(rb->aux_watermark, &rb->aux_wakeup);
447 handle->wakeup = local_read(&rb->aux_wakeup) +
451 handle->head = aux_head;
452 handle->size -= size;
457 void *perf_get_aux(struct perf_output_handle *handle)
459 /* this is only valid between perf_aux_output_begin and *_end */
463 return handle->rb->aux_priv;
466 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
468 static struct page *rb_alloc_aux_page(int node, int order)
472 if (order > MAX_ORDER)
476 page = alloc_pages_node(node, PERF_AUX_GFP, order);
477 } while (!page && order--);
481 * Communicate the allocation size to the driver:
482 * if we managed to secure a high-order allocation,
483 * set its first page's private to this order;
484 * !PagePrivate(page) means it's just a normal page.
486 split_page(page, order);
487 SetPagePrivate(page);
488 set_page_private(page, order);
494 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
496 struct page *page = virt_to_page(rb->aux_pages[idx]);
498 ClearPagePrivate(page);
499 page->mapping = NULL;
503 static void __rb_free_aux(struct ring_buffer *rb)
508 rb->free_aux(rb->aux_priv);
513 if (rb->aux_nr_pages) {
514 for (pg = 0; pg < rb->aux_nr_pages; pg++)
515 rb_free_aux_page(rb, pg);
517 kfree(rb->aux_pages);
518 rb->aux_nr_pages = 0;
522 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
523 pgoff_t pgoff, int nr_pages, long watermark, int flags)
525 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
526 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
527 int ret = -ENOMEM, max_order = 0;
532 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
534 * We need to start with the max_order that fits in nr_pages,
535 * not the other way around, hence ilog2() and not get_order.
537 max_order = ilog2(nr_pages);
540 * PMU requests more than one contiguous chunks of memory
541 * for SW double buffering
543 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
552 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
556 rb->free_aux = event->pmu->free_aux;
557 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
561 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
562 page = rb_alloc_aux_page(node, order);
566 for (last = rb->aux_nr_pages + (1 << page_private(page));
567 last > rb->aux_nr_pages; rb->aux_nr_pages++)
568 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
572 * In overwrite mode, PMUs that don't support SG may not handle more
573 * than one contiguous allocation, since they rely on PMI to do double
574 * buffering. In this case, the entire buffer has to be one contiguous
577 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
579 struct page *page = virt_to_page(rb->aux_pages[0]);
581 if (page_private(page) != max_order)
585 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
593 * aux_pages (and pmu driver's private data, aux_priv) will be
594 * referenced in both producer's and consumer's contexts, thus
595 * we keep a refcount here to make sure either of the two can
596 * reference them safely.
598 atomic_set(&rb->aux_refcount, 1);
600 rb->aux_overwrite = overwrite;
601 rb->aux_watermark = watermark;
603 if (!rb->aux_watermark && !rb->aux_overwrite)
604 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
608 rb->aux_pgoff = pgoff;
615 void rb_free_aux(struct ring_buffer *rb)
617 if (atomic_dec_and_test(&rb->aux_refcount))
618 irq_work_queue(&rb->irq_work);
621 static void rb_irq_work(struct irq_work *work)
623 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
625 if (!atomic_read(&rb->aux_refcount))
628 if (rb->rcu_head.next == (void *)rb)
629 call_rcu(&rb->rcu_head, rb_free_rcu);
632 #ifndef CONFIG_PERF_USE_VMALLOC
635 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
639 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
641 if (pgoff > rb->nr_pages)
645 return virt_to_page(rb->user_page);
647 return virt_to_page(rb->data_pages[pgoff - 1]);
650 static void *perf_mmap_alloc_page(int cpu)
655 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
656 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
660 return page_address(page);
663 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
665 struct ring_buffer *rb;
669 size = sizeof(struct ring_buffer);
670 size += nr_pages * sizeof(void *);
672 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
675 rb = kzalloc(size, GFP_KERNEL);
679 rb->user_page = perf_mmap_alloc_page(cpu);
683 for (i = 0; i < nr_pages; i++) {
684 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
685 if (!rb->data_pages[i])
686 goto fail_data_pages;
689 rb->nr_pages = nr_pages;
691 ring_buffer_init(rb, watermark, flags);
696 for (i--; i >= 0; i--)
697 free_page((unsigned long)rb->data_pages[i]);
699 free_page((unsigned long)rb->user_page);
708 static void perf_mmap_free_page(unsigned long addr)
710 struct page *page = virt_to_page((void *)addr);
712 page->mapping = NULL;
716 void rb_free(struct ring_buffer *rb)
720 perf_mmap_free_page((unsigned long)rb->user_page);
721 for (i = 0; i < rb->nr_pages; i++)
722 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
727 static int data_page_nr(struct ring_buffer *rb)
729 return rb->nr_pages << page_order(rb);
733 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
735 /* The '>' counts in the user page. */
736 if (pgoff > data_page_nr(rb))
739 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
742 static void perf_mmap_unmark_page(void *addr)
744 struct page *page = vmalloc_to_page(addr);
746 page->mapping = NULL;
749 static void rb_free_work(struct work_struct *work)
751 struct ring_buffer *rb;
755 rb = container_of(work, struct ring_buffer, work);
756 nr = data_page_nr(rb);
758 base = rb->user_page;
759 /* The '<=' counts in the user page. */
760 for (i = 0; i <= nr; i++)
761 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
767 void rb_free(struct ring_buffer *rb)
769 schedule_work(&rb->work);
772 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
774 struct ring_buffer *rb;
778 size = sizeof(struct ring_buffer);
779 size += sizeof(void *);
781 rb = kzalloc(size, GFP_KERNEL);
785 INIT_WORK(&rb->work, rb_free_work);
787 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
791 rb->user_page = all_buf;
792 rb->data_pages[0] = all_buf + PAGE_SIZE;
793 rb->page_order = ilog2(nr_pages);
794 rb->nr_pages = !!nr_pages;
796 ring_buffer_init(rb, watermark, flags);
810 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
812 if (rb->aux_nr_pages) {
813 /* above AUX space */
814 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
818 if (pgoff >= rb->aux_pgoff) {
819 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
820 return virt_to_page(rb->aux_pages[aux_pgoff]);
824 return __perf_mmap_to_page(rb, pgoff);