1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3 * Ring buffer operations.
5 * Copyright (C) 2020 Facebook, Inc.
14 #include <linux/err.h>
15 #include <linux/bpf.h>
16 #include <asm/barrier.h>
18 #include <sys/epoll.h>
22 #include "libbpf_internal.h"
26 ring_buffer_sample_fn sample_cb;
29 unsigned long *consumer_pos;
30 unsigned long *producer_pos;
36 struct epoll_event *events;
43 struct user_ring_buffer {
44 struct epoll_event event;
45 unsigned long *consumer_pos;
46 unsigned long *producer_pos;
54 /* 8-byte ring buffer header structure */
60 static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)
62 if (r->consumer_pos) {
63 munmap(r->consumer_pos, rb->page_size);
64 r->consumer_pos = NULL;
66 if (r->producer_pos) {
67 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
68 r->producer_pos = NULL;
74 /* Add extra RINGBUF maps to this ring buffer manager */
75 int ring_buffer__add(struct ring_buffer *rb, int map_fd,
76 ring_buffer_sample_fn sample_cb, void *ctx)
78 struct bpf_map_info info;
79 __u32 len = sizeof(info);
80 struct epoll_event *e;
86 memset(&info, 0, sizeof(info));
88 err = bpf_map_get_info_by_fd(map_fd, &info, &len);
91 pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
93 return libbpf_err(err);
96 if (info.type != BPF_MAP_TYPE_RINGBUF) {
97 pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n",
99 return libbpf_err(-EINVAL);
102 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
104 return libbpf_err(-ENOMEM);
107 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
109 return libbpf_err(-ENOMEM);
112 r = calloc(1, sizeof(*r));
114 return libbpf_err(-ENOMEM);
115 rb->rings[rb->ring_cnt] = r;
118 r->sample_cb = sample_cb;
120 r->mask = info.max_entries - 1;
122 /* Map writable consumer page */
123 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
124 if (tmp == MAP_FAILED) {
126 pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
130 r->consumer_pos = tmp;
132 /* Map read-only producer page and data pages. We map twice as big
133 * data size to allow simple reading of samples that wrap around the
134 * end of a ring buffer. See kernel implementation for details.
136 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
137 if (mmap_sz != (__u64)(size_t)mmap_sz) {
139 pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
142 tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
143 if (tmp == MAP_FAILED) {
145 pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
149 r->producer_pos = tmp;
150 r->data = tmp + rb->page_size;
152 e = &rb->events[rb->ring_cnt];
153 memset(e, 0, sizeof(*e));
156 e->data.fd = rb->ring_cnt;
157 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
159 pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
168 ringbuf_free_ring(rb, r);
169 return libbpf_err(err);
172 void ring_buffer__free(struct ring_buffer *rb)
179 for (i = 0; i < rb->ring_cnt; ++i)
180 ringbuf_free_ring(rb, rb->rings[i]);
181 if (rb->epoll_fd >= 0)
190 ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
191 const struct ring_buffer_opts *opts)
193 struct ring_buffer *rb;
196 if (!OPTS_VALID(opts, ring_buffer_opts))
197 return errno = EINVAL, NULL;
199 rb = calloc(1, sizeof(*rb));
201 return errno = ENOMEM, NULL;
203 rb->page_size = getpagesize();
205 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
206 if (rb->epoll_fd < 0) {
208 pr_warn("ringbuf: failed to create epoll instance: %d\n", err);
212 err = ring_buffer__add(rb, map_fd, sample_cb, ctx);
219 ring_buffer__free(rb);
220 return errno = -err, NULL;
223 static inline int roundup_len(__u32 len)
225 /* clear out top 2 bits (discard and busy, if set) */
228 /* add length prefix */
229 len += BPF_RINGBUF_HDR_SZ;
230 /* round up to 8 byte alignment */
231 return (len + 7) / 8 * 8;
234 static int64_t ringbuf_process_ring(struct ring *r)
236 int *len_ptr, len, err;
237 /* 64-bit to avoid overflow in case of extreme application behavior */
239 unsigned long cons_pos, prod_pos;
243 cons_pos = smp_load_acquire(r->consumer_pos);
245 got_new_data = false;
246 prod_pos = smp_load_acquire(r->producer_pos);
247 while (cons_pos < prod_pos) {
248 len_ptr = r->data + (cons_pos & r->mask);
249 len = smp_load_acquire(len_ptr);
251 /* sample not committed yet, bail out for now */
252 if (len & BPF_RINGBUF_BUSY_BIT)
256 cons_pos += roundup_len(len);
258 if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
259 sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
260 err = r->sample_cb(r->ctx, sample, len);
262 /* update consumer pos and bail out */
263 smp_store_release(r->consumer_pos,
270 smp_store_release(r->consumer_pos, cons_pos);
272 } while (got_new_data);
277 /* Consume available ring buffer(s) data without event polling.
278 * Returns number of records consumed across all registered ring buffers (or
279 * INT_MAX, whichever is less), or negative number if any of the callbacks
282 int ring_buffer__consume(struct ring_buffer *rb)
284 int64_t err, res = 0;
287 for (i = 0; i < rb->ring_cnt; i++) {
288 struct ring *ring = rb->rings[i];
290 err = ringbuf_process_ring(ring);
292 return libbpf_err(err);
300 /* Poll for available data and consume records, if any are available.
301 * Returns number of records consumed (or INT_MAX, whichever is less), or
302 * negative number, if any of the registered callbacks returned error.
304 int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
307 int64_t err, res = 0;
309 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
311 return libbpf_err(-errno);
313 for (i = 0; i < cnt; i++) {
314 __u32 ring_id = rb->events[i].data.fd;
315 struct ring *ring = rb->rings[ring_id];
317 err = ringbuf_process_ring(ring);
319 return libbpf_err(err);
327 /* Get an fd that can be used to sleep until data is available in the ring(s) */
328 int ring_buffer__epoll_fd(const struct ring_buffer *rb)
333 struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx)
335 if (idx >= rb->ring_cnt)
336 return errno = ERANGE, NULL;
338 return rb->rings[idx];
341 unsigned long ring__consumer_pos(const struct ring *r)
343 /* Synchronizes with smp_store_release() in ringbuf_process_ring(). */
344 return smp_load_acquire(r->consumer_pos);
347 unsigned long ring__producer_pos(const struct ring *r)
349 /* Synchronizes with smp_store_release() in __bpf_ringbuf_reserve() in
352 return smp_load_acquire(r->producer_pos);
355 size_t ring__avail_data_size(const struct ring *r)
357 unsigned long cons_pos, prod_pos;
359 cons_pos = ring__consumer_pos(r);
360 prod_pos = ring__producer_pos(r);
361 return prod_pos - cons_pos;
364 size_t ring__size(const struct ring *r)
369 int ring__map_fd(const struct ring *r)
374 int ring__consume(struct ring *r)
378 res = ringbuf_process_ring(r);
380 return libbpf_err(res);
382 return res > INT_MAX ? INT_MAX : res;
385 static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
387 if (rb->consumer_pos) {
388 munmap(rb->consumer_pos, rb->page_size);
389 rb->consumer_pos = NULL;
391 if (rb->producer_pos) {
392 munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1));
393 rb->producer_pos = NULL;
397 void user_ring_buffer__free(struct user_ring_buffer *rb)
402 user_ringbuf_unmap_ring(rb);
404 if (rb->epoll_fd >= 0)
410 static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
412 struct bpf_map_info info;
413 __u32 len = sizeof(info);
416 struct epoll_event *rb_epoll;
419 memset(&info, 0, sizeof(info));
421 err = bpf_map_get_info_by_fd(map_fd, &info, &len);
424 pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
428 if (info.type != BPF_MAP_TYPE_USER_RINGBUF) {
429 pr_warn("user ringbuf: map fd=%d is not BPF_MAP_TYPE_USER_RINGBUF\n", map_fd);
434 rb->mask = info.max_entries - 1;
436 /* Map read-only consumer page */
437 tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
438 if (tmp == MAP_FAILED) {
440 pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
444 rb->consumer_pos = tmp;
446 /* Map read-write the producer page and data pages. We map the data
447 * region as twice the total size of the ring buffer to allow the
448 * simple reading and writing of samples that wrap around the end of
449 * the buffer. See the kernel implementation for details.
451 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
452 if (mmap_sz != (__u64)(size_t)mmap_sz) {
453 pr_warn("user ringbuf: ring buf size (%u) is too big\n", info.max_entries);
456 tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
457 map_fd, rb->page_size);
458 if (tmp == MAP_FAILED) {
460 pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
465 rb->producer_pos = tmp;
466 rb->data = tmp + rb->page_size;
468 rb_epoll = &rb->event;
469 rb_epoll->events = EPOLLOUT;
470 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
472 pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err);
479 struct user_ring_buffer *
480 user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts)
482 struct user_ring_buffer *rb;
485 if (!OPTS_VALID(opts, user_ring_buffer_opts))
486 return errno = EINVAL, NULL;
488 rb = calloc(1, sizeof(*rb));
490 return errno = ENOMEM, NULL;
492 rb->page_size = getpagesize();
494 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
495 if (rb->epoll_fd < 0) {
497 pr_warn("user ringbuf: failed to create epoll instance: %d\n", err);
501 err = user_ringbuf_map(rb, map_fd);
508 user_ring_buffer__free(rb);
509 return errno = -err, NULL;
512 static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard)
515 struct ringbuf_hdr *hdr;
516 uintptr_t hdr_offset;
518 hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ;
519 hdr = rb->data + (hdr_offset & rb->mask);
521 new_len = hdr->len & ~BPF_RINGBUF_BUSY_BIT;
523 new_len |= BPF_RINGBUF_DISCARD_BIT;
525 /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
528 __atomic_exchange_n(&hdr->len, new_len, __ATOMIC_ACQ_REL);
531 void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample)
533 user_ringbuf_commit(rb, sample, true);
536 void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample)
538 user_ringbuf_commit(rb, sample, false);
541 void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
543 __u32 avail_size, total_size, max_size;
544 /* 64-bit to avoid overflow in case of extreme application behavior */
545 __u64 cons_pos, prod_pos;
546 struct ringbuf_hdr *hdr;
548 /* The top two bits are used as special flags */
549 if (size & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT))
550 return errno = E2BIG, NULL;
552 /* Synchronizes with smp_store_release() in __bpf_user_ringbuf_peek() in
555 cons_pos = smp_load_acquire(rb->consumer_pos);
556 /* Synchronizes with smp_store_release() in user_ringbuf_commit() */
557 prod_pos = smp_load_acquire(rb->producer_pos);
559 max_size = rb->mask + 1;
560 avail_size = max_size - (prod_pos - cons_pos);
561 /* Round up total size to a multiple of 8. */
562 total_size = (size + BPF_RINGBUF_HDR_SZ + 7) / 8 * 8;
564 if (total_size > max_size)
565 return errno = E2BIG, NULL;
567 if (avail_size < total_size)
568 return errno = ENOSPC, NULL;
570 hdr = rb->data + (prod_pos & rb->mask);
571 hdr->len = size | BPF_RINGBUF_BUSY_BIT;
574 /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
577 smp_store_release(rb->producer_pos, prod_pos + total_size);
579 return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask);
582 static __u64 ns_elapsed_timespec(const struct timespec *start, const struct timespec *end)
584 __u64 start_ns, end_ns, ns_per_s = 1000000000;
586 start_ns = (__u64)start->tv_sec * ns_per_s + start->tv_nsec;
587 end_ns = (__u64)end->tv_sec * ns_per_s + end->tv_nsec;
589 return end_ns - start_ns;
592 void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms)
595 int err, ms_remaining = timeout_ms;
596 struct timespec start;
598 if (timeout_ms < 0 && timeout_ms != -1)
599 return errno = EINVAL, NULL;
601 if (timeout_ms != -1) {
602 err = clock_gettime(CLOCK_MONOTONIC, &start);
609 struct timespec curr;
610 __u64 ns_per_ms = 1000000;
612 sample = user_ring_buffer__reserve(rb, size);
615 else if (errno != ENOSPC)
618 /* The kernel guarantees at least one event notification
619 * delivery whenever at least one sample is drained from the
620 * ring buffer in an invocation to bpf_ringbuf_drain(). Other
621 * additional events may be delivered at any time, but only one
622 * event is guaranteed per bpf_ringbuf_drain() invocation,
623 * provided that a sample is drained, and the BPF program did
624 * not pass BPF_RB_NO_WAKEUP to bpf_ringbuf_drain(). If
625 * BPF_RB_FORCE_WAKEUP is passed to bpf_ringbuf_drain(), a
626 * wakeup event will be delivered even if no samples are
629 cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining);
633 if (timeout_ms == -1)
636 err = clock_gettime(CLOCK_MONOTONIC, &curr);
640 ms_elapsed = ns_elapsed_timespec(&start, &curr) / ns_per_ms;
641 ms_remaining = timeout_ms - ms_elapsed;
642 } while (ms_remaining > 0);
644 /* Try one more time to reserve a sample after the specified timeout has elapsed. */
645 return user_ring_buffer__reserve(rb, size);