1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
16 #include "map_in_map.h"
18 #define ARRAY_CREATE_FLAG_MASK \
19 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
20 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
22 static void bpf_array_free_percpu(struct bpf_array *array)
26 for (i = 0; i < array->map.max_entries; i++) {
27 free_percpu(array->pptrs[i]);
32 static int bpf_array_alloc_percpu(struct bpf_array *array)
37 for (i = 0; i < array->map.max_entries; i++) {
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39 GFP_USER | __GFP_NOWARN);
41 bpf_array_free_percpu(array);
44 array->pptrs[i] = ptr;
51 /* Called from syscall */
52 int array_map_alloc_check(union bpf_attr *attr)
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr);
57 /* check sanity of attributes */
58 if (attr->max_entries == 0 || attr->key_size != 4 ||
59 attr->value_size == 0 ||
60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 !bpf_map_flags_access_ok(attr->map_flags) ||
62 (percpu && numa_node != NUMA_NO_NODE))
65 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
66 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
69 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70 attr->map_flags & BPF_F_PRESERVE_ELEMS)
73 if (attr->value_size > KMALLOC_MAX_SIZE)
74 /* if value_size is bigger, the user space won't be able to
75 * access the elements.
82 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
84 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
85 int numa_node = bpf_map_attr_numa_node(attr);
86 u32 elem_size, index_mask, max_entries;
87 bool bypass_spec_v1 = bpf_bypass_spec_v1();
88 u64 array_size, mask64;
89 struct bpf_array *array;
91 elem_size = round_up(attr->value_size, 8);
93 max_entries = attr->max_entries;
95 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
96 * upper most bit set in u32 space is undefined behavior due to
97 * resulting 1U << 32, so do it manually here in u64 space.
99 mask64 = fls_long(max_entries - 1);
100 mask64 = 1ULL << mask64;
104 if (!bypass_spec_v1) {
105 /* round up array size to nearest power of 2,
106 * since cpu will speculate within index_mask limits
108 max_entries = index_mask + 1;
109 /* Check for overflows. */
110 if (max_entries < attr->max_entries)
111 return ERR_PTR(-E2BIG);
114 array_size = sizeof(*array);
116 array_size += (u64) max_entries * sizeof(void *);
118 /* rely on vmalloc() to return page-aligned memory and
119 * ensure array->value is exactly page-aligned
121 if (attr->map_flags & BPF_F_MMAPABLE) {
122 array_size = PAGE_ALIGN(array_size);
123 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
125 array_size += (u64) max_entries * elem_size;
129 /* allocate all map elements and zero-initialize them */
130 if (attr->map_flags & BPF_F_MMAPABLE) {
133 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
134 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
136 return ERR_PTR(-ENOMEM);
137 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
138 - offsetof(struct bpf_array, value);
140 array = bpf_map_area_alloc(array_size, numa_node);
143 return ERR_PTR(-ENOMEM);
144 array->index_mask = index_mask;
145 array->map.bypass_spec_v1 = bypass_spec_v1;
147 /* copy mandatory map attributes */
148 bpf_map_init_from_attr(&array->map, attr);
149 array->elem_size = elem_size;
151 if (percpu && bpf_array_alloc_percpu(array)) {
152 bpf_map_area_free(array);
153 return ERR_PTR(-ENOMEM);
159 /* Called from syscall or from eBPF program */
160 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
162 struct bpf_array *array = container_of(map, struct bpf_array, map);
163 u32 index = *(u32 *)key;
165 if (unlikely(index >= array->map.max_entries))
168 return array->value + array->elem_size * (index & array->index_mask);
171 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
174 struct bpf_array *array = container_of(map, struct bpf_array, map);
176 if (map->max_entries != 1)
178 if (off >= map->value_size)
181 *imm = (unsigned long)array->value;
185 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
188 struct bpf_array *array = container_of(map, struct bpf_array, map);
189 u64 base = (unsigned long)array->value;
190 u64 range = array->elem_size;
192 if (map->max_entries != 1)
194 if (imm < base || imm >= base + range)
201 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
202 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
204 struct bpf_array *array = container_of(map, struct bpf_array, map);
205 struct bpf_insn *insn = insn_buf;
206 u32 elem_size = round_up(map->value_size, 8);
207 const int ret = BPF_REG_0;
208 const int map_ptr = BPF_REG_1;
209 const int index = BPF_REG_2;
211 if (map->map_flags & BPF_F_INNER_MAP)
214 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
215 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
216 if (!map->bypass_spec_v1) {
217 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
218 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
220 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
223 if (is_power_of_2(elem_size)) {
224 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
226 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
228 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
229 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
230 *insn++ = BPF_MOV64_IMM(ret, 0);
231 return insn - insn_buf;
234 /* Called from eBPF program */
235 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
237 struct bpf_array *array = container_of(map, struct bpf_array, map);
238 u32 index = *(u32 *)key;
240 if (unlikely(index >= array->map.max_entries))
243 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
246 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
248 struct bpf_array *array = container_of(map, struct bpf_array, map);
249 u32 index = *(u32 *)key;
251 if (cpu >= nr_cpu_ids)
254 if (unlikely(index >= array->map.max_entries))
257 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
260 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
262 struct bpf_array *array = container_of(map, struct bpf_array, map);
263 u32 index = *(u32 *)key;
268 if (unlikely(index >= array->map.max_entries))
271 /* per_cpu areas are zero-filled and bpf programs can only
272 * access 'value_size' of them, so copying rounded areas
273 * will not leak any kernel data
275 size = round_up(map->value_size, 8);
277 pptr = array->pptrs[index & array->index_mask];
278 for_each_possible_cpu(cpu) {
279 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
286 /* Called from syscall */
287 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
289 struct bpf_array *array = container_of(map, struct bpf_array, map);
290 u32 index = key ? *(u32 *)key : U32_MAX;
291 u32 *next = (u32 *)next_key;
293 if (index >= array->map.max_entries) {
298 if (index == array->map.max_entries - 1)
305 static void check_and_free_fields(struct bpf_array *arr, void *val)
307 if (map_value_has_timer(&arr->map))
308 bpf_timer_cancel_and_free(val + arr->map.timer_off);
309 if (map_value_has_kptrs(&arr->map))
310 bpf_map_free_kptrs(&arr->map, val);
313 /* Called from syscall or from eBPF program */
314 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
317 struct bpf_array *array = container_of(map, struct bpf_array, map);
318 u32 index = *(u32 *)key;
321 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
325 if (unlikely(index >= array->map.max_entries))
326 /* all elements were pre-allocated, cannot insert a new one */
329 if (unlikely(map_flags & BPF_NOEXIST))
330 /* all elements already exist */
333 if (unlikely((map_flags & BPF_F_LOCK) &&
334 !map_value_has_spin_lock(map)))
337 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
338 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
339 value, map->value_size);
342 array->elem_size * (index & array->index_mask);
343 if (map_flags & BPF_F_LOCK)
344 copy_map_value_locked(map, val, value, false);
346 copy_map_value(map, val, value);
347 check_and_free_fields(array, val);
352 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
355 struct bpf_array *array = container_of(map, struct bpf_array, map);
356 u32 index = *(u32 *)key;
361 if (unlikely(map_flags > BPF_EXIST))
365 if (unlikely(index >= array->map.max_entries))
366 /* all elements were pre-allocated, cannot insert a new one */
369 if (unlikely(map_flags == BPF_NOEXIST))
370 /* all elements already exist */
373 /* the user space will provide round_up(value_size, 8) bytes that
374 * will be copied into per-cpu area. bpf programs can only access
375 * value_size of it. During lookup the same extra bytes will be
376 * returned or zeros which were zero-filled by percpu_alloc,
377 * so no kernel data leaks possible
379 size = round_up(map->value_size, 8);
381 pptr = array->pptrs[index & array->index_mask];
382 for_each_possible_cpu(cpu) {
383 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
390 /* Called from syscall or from eBPF program */
391 static int array_map_delete_elem(struct bpf_map *map, void *key)
396 static void *array_map_vmalloc_addr(struct bpf_array *array)
398 return (void *)round_down((unsigned long)array, PAGE_SIZE);
401 static void array_map_free_timers(struct bpf_map *map)
403 struct bpf_array *array = container_of(map, struct bpf_array, map);
406 /* We don't reset or free kptr on uref dropping to zero. */
407 if (!map_value_has_timer(map))
410 for (i = 0; i < array->map.max_entries; i++)
411 bpf_timer_cancel_and_free(array->value + array->elem_size * i +
415 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
416 static void array_map_free(struct bpf_map *map)
418 struct bpf_array *array = container_of(map, struct bpf_array, map);
421 if (map_value_has_kptrs(map)) {
422 for (i = 0; i < array->map.max_entries; i++)
423 bpf_map_free_kptrs(map, array->value + array->elem_size * i);
424 bpf_map_free_kptr_off_tab(map);
427 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
428 bpf_array_free_percpu(array);
430 if (array->map.map_flags & BPF_F_MMAPABLE)
431 bpf_map_area_free(array_map_vmalloc_addr(array));
433 bpf_map_area_free(array);
436 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
443 value = array_map_lookup_elem(map, key);
449 if (map->btf_key_type_id)
450 seq_printf(m, "%u: ", *(u32 *)key);
451 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
457 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
460 struct bpf_array *array = container_of(map, struct bpf_array, map);
461 u32 index = *(u32 *)key;
467 seq_printf(m, "%u: {\n", *(u32 *)key);
468 pptr = array->pptrs[index & array->index_mask];
469 for_each_possible_cpu(cpu) {
470 seq_printf(m, "\tcpu%d: ", cpu);
471 btf_type_seq_show(map->btf, map->btf_value_type_id,
472 per_cpu_ptr(pptr, cpu), m);
480 static int array_map_check_btf(const struct bpf_map *map,
481 const struct btf *btf,
482 const struct btf_type *key_type,
483 const struct btf_type *value_type)
487 /* One exception for keyless BTF: .bss/.data/.rodata map */
488 if (btf_type_is_void(key_type)) {
489 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
490 map->max_entries != 1)
493 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
499 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
502 int_data = *(u32 *)(key_type + 1);
503 /* bpf array can only take a u32 key. This check makes sure
504 * that the btf matches the attr used during map_create.
506 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
512 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
514 struct bpf_array *array = container_of(map, struct bpf_array, map);
515 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
517 if (!(map->map_flags & BPF_F_MMAPABLE))
520 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
521 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
524 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
525 vma->vm_pgoff + pgoff);
528 static bool array_map_meta_equal(const struct bpf_map *meta0,
529 const struct bpf_map *meta1)
531 if (!bpf_map_meta_equal(meta0, meta1))
533 return meta0->map_flags & BPF_F_INNER_MAP ? true :
534 meta0->max_entries == meta1->max_entries;
537 struct bpf_iter_seq_array_map_info {
539 void *percpu_value_buf;
543 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
545 struct bpf_iter_seq_array_map_info *info = seq->private;
546 struct bpf_map *map = info->map;
547 struct bpf_array *array;
550 if (info->index >= map->max_entries)
555 array = container_of(map, struct bpf_array, map);
556 index = info->index & array->index_mask;
557 if (info->percpu_value_buf)
558 return array->pptrs[index];
559 return array->value + array->elem_size * index;
562 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
564 struct bpf_iter_seq_array_map_info *info = seq->private;
565 struct bpf_map *map = info->map;
566 struct bpf_array *array;
571 if (info->index >= map->max_entries)
574 array = container_of(map, struct bpf_array, map);
575 index = info->index & array->index_mask;
576 if (info->percpu_value_buf)
577 return array->pptrs[index];
578 return array->value + array->elem_size * index;
581 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
583 struct bpf_iter_seq_array_map_info *info = seq->private;
584 struct bpf_iter__bpf_map_elem ctx = {};
585 struct bpf_map *map = info->map;
586 struct bpf_iter_meta meta;
587 struct bpf_prog *prog;
588 int off = 0, cpu = 0;
589 void __percpu **pptr;
593 prog = bpf_iter_get_info(&meta, v == NULL);
600 ctx.key = &info->index;
602 if (!info->percpu_value_buf) {
606 size = round_up(map->value_size, 8);
607 for_each_possible_cpu(cpu) {
608 bpf_long_memcpy(info->percpu_value_buf + off,
609 per_cpu_ptr(pptr, cpu),
613 ctx.value = info->percpu_value_buf;
617 return bpf_iter_run_prog(prog, &ctx);
620 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
622 return __bpf_array_map_seq_show(seq, v);
625 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
628 (void)__bpf_array_map_seq_show(seq, NULL);
631 static int bpf_iter_init_array_map(void *priv_data,
632 struct bpf_iter_aux_info *aux)
634 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
635 struct bpf_map *map = aux->map;
639 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
640 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
641 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
645 seq_info->percpu_value_buf = value_buf;
652 static void bpf_iter_fini_array_map(void *priv_data)
654 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
656 kfree(seq_info->percpu_value_buf);
659 static const struct seq_operations bpf_array_map_seq_ops = {
660 .start = bpf_array_map_seq_start,
661 .next = bpf_array_map_seq_next,
662 .stop = bpf_array_map_seq_stop,
663 .show = bpf_array_map_seq_show,
666 static const struct bpf_iter_seq_info iter_seq_info = {
667 .seq_ops = &bpf_array_map_seq_ops,
668 .init_seq_private = bpf_iter_init_array_map,
669 .fini_seq_private = bpf_iter_fini_array_map,
670 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
673 static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
674 void *callback_ctx, u64 flags)
676 u32 i, key, num_elems = 0;
677 struct bpf_array *array;
685 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
686 array = container_of(map, struct bpf_array, map);
689 for (i = 0; i < map->max_entries; i++) {
691 val = this_cpu_ptr(array->pptrs[i]);
693 val = array->value + array->elem_size * i;
696 ret = callback_fn((u64)(long)map, (u64)(long)&key,
697 (u64)(long)val, (u64)(long)callback_ctx, 0);
698 /* return value: 0 - continue, 1 - stop and return */
708 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
709 const struct bpf_map_ops array_map_ops = {
710 .map_meta_equal = array_map_meta_equal,
711 .map_alloc_check = array_map_alloc_check,
712 .map_alloc = array_map_alloc,
713 .map_free = array_map_free,
714 .map_get_next_key = array_map_get_next_key,
715 .map_release_uref = array_map_free_timers,
716 .map_lookup_elem = array_map_lookup_elem,
717 .map_update_elem = array_map_update_elem,
718 .map_delete_elem = array_map_delete_elem,
719 .map_gen_lookup = array_map_gen_lookup,
720 .map_direct_value_addr = array_map_direct_value_addr,
721 .map_direct_value_meta = array_map_direct_value_meta,
722 .map_mmap = array_map_mmap,
723 .map_seq_show_elem = array_map_seq_show_elem,
724 .map_check_btf = array_map_check_btf,
725 .map_lookup_batch = generic_map_lookup_batch,
726 .map_update_batch = generic_map_update_batch,
727 .map_set_for_each_callback_args = map_set_for_each_callback_args,
728 .map_for_each_callback = bpf_for_each_array_elem,
729 .map_btf_id = &array_map_btf_ids[0],
730 .iter_seq_info = &iter_seq_info,
733 const struct bpf_map_ops percpu_array_map_ops = {
734 .map_meta_equal = bpf_map_meta_equal,
735 .map_alloc_check = array_map_alloc_check,
736 .map_alloc = array_map_alloc,
737 .map_free = array_map_free,
738 .map_get_next_key = array_map_get_next_key,
739 .map_lookup_elem = percpu_array_map_lookup_elem,
740 .map_update_elem = array_map_update_elem,
741 .map_delete_elem = array_map_delete_elem,
742 .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
743 .map_seq_show_elem = percpu_array_map_seq_show_elem,
744 .map_check_btf = array_map_check_btf,
745 .map_lookup_batch = generic_map_lookup_batch,
746 .map_update_batch = generic_map_update_batch,
747 .map_set_for_each_callback_args = map_set_for_each_callback_args,
748 .map_for_each_callback = bpf_for_each_array_elem,
749 .map_btf_id = &array_map_btf_ids[0],
750 .iter_seq_info = &iter_seq_info,
753 static int fd_array_map_alloc_check(union bpf_attr *attr)
755 /* only file descriptors can be stored in this type of map */
756 if (attr->value_size != sizeof(u32))
758 /* Program read-only/write-only not supported for special maps yet. */
759 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
761 return array_map_alloc_check(attr);
764 static void fd_array_map_free(struct bpf_map *map)
766 struct bpf_array *array = container_of(map, struct bpf_array, map);
769 /* make sure it's empty */
770 for (i = 0; i < array->map.max_entries; i++)
771 BUG_ON(array->ptrs[i] != NULL);
773 bpf_map_area_free(array);
776 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
778 return ERR_PTR(-EOPNOTSUPP);
781 /* only called from syscall */
782 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
787 if (!map->ops->map_fd_sys_lookup_elem)
791 elem = array_map_lookup_elem(map, key);
792 if (elem && (ptr = READ_ONCE(*elem)))
793 *value = map->ops->map_fd_sys_lookup_elem(ptr);
801 /* only called from syscall */
802 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
803 void *key, void *value, u64 map_flags)
805 struct bpf_array *array = container_of(map, struct bpf_array, map);
806 void *new_ptr, *old_ptr;
807 u32 index = *(u32 *)key, ufd;
809 if (map_flags != BPF_ANY)
812 if (index >= array->map.max_entries)
816 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
818 return PTR_ERR(new_ptr);
820 if (map->ops->map_poke_run) {
821 mutex_lock(&array->aux->poke_mutex);
822 old_ptr = xchg(array->ptrs + index, new_ptr);
823 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
824 mutex_unlock(&array->aux->poke_mutex);
826 old_ptr = xchg(array->ptrs + index, new_ptr);
830 map->ops->map_fd_put_ptr(old_ptr);
834 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
836 struct bpf_array *array = container_of(map, struct bpf_array, map);
838 u32 index = *(u32 *)key;
840 if (index >= array->map.max_entries)
843 if (map->ops->map_poke_run) {
844 mutex_lock(&array->aux->poke_mutex);
845 old_ptr = xchg(array->ptrs + index, NULL);
846 map->ops->map_poke_run(map, index, old_ptr, NULL);
847 mutex_unlock(&array->aux->poke_mutex);
849 old_ptr = xchg(array->ptrs + index, NULL);
853 map->ops->map_fd_put_ptr(old_ptr);
860 static void *prog_fd_array_get_ptr(struct bpf_map *map,
861 struct file *map_file, int fd)
863 struct bpf_prog *prog = bpf_prog_get(fd);
868 if (!bpf_prog_map_compatible(map, prog)) {
870 return ERR_PTR(-EINVAL);
876 static void prog_fd_array_put_ptr(void *ptr)
881 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
883 return ((struct bpf_prog *)ptr)->aux->id;
886 /* decrement refcnt of all bpf_progs that are stored in this map */
887 static void bpf_fd_array_map_clear(struct bpf_map *map)
889 struct bpf_array *array = container_of(map, struct bpf_array, map);
892 for (i = 0; i < array->map.max_entries; i++)
893 fd_array_map_delete_elem(map, &i);
896 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
904 elem = array_map_lookup_elem(map, key);
906 ptr = READ_ONCE(*elem);
908 seq_printf(m, "%u: ", *(u32 *)key);
909 prog_id = prog_fd_array_sys_lookup_elem(ptr);
910 btf_type_seq_show(map->btf, map->btf_value_type_id,
919 struct prog_poke_elem {
920 struct list_head list;
921 struct bpf_prog_aux *aux;
924 static int prog_array_map_poke_track(struct bpf_map *map,
925 struct bpf_prog_aux *prog_aux)
927 struct prog_poke_elem *elem;
928 struct bpf_array_aux *aux;
931 aux = container_of(map, struct bpf_array, map)->aux;
932 mutex_lock(&aux->poke_mutex);
933 list_for_each_entry(elem, &aux->poke_progs, list) {
934 if (elem->aux == prog_aux)
938 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
944 INIT_LIST_HEAD(&elem->list);
945 /* We must track the program's aux info at this point in time
946 * since the program pointer itself may not be stable yet, see
947 * also comment in prog_array_map_poke_run().
949 elem->aux = prog_aux;
951 list_add_tail(&elem->list, &aux->poke_progs);
953 mutex_unlock(&aux->poke_mutex);
957 static void prog_array_map_poke_untrack(struct bpf_map *map,
958 struct bpf_prog_aux *prog_aux)
960 struct prog_poke_elem *elem, *tmp;
961 struct bpf_array_aux *aux;
963 aux = container_of(map, struct bpf_array, map)->aux;
964 mutex_lock(&aux->poke_mutex);
965 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
966 if (elem->aux == prog_aux) {
967 list_del_init(&elem->list);
972 mutex_unlock(&aux->poke_mutex);
975 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
976 struct bpf_prog *old,
977 struct bpf_prog *new)
979 u8 *old_addr, *new_addr, *old_bypass_addr;
980 struct prog_poke_elem *elem;
981 struct bpf_array_aux *aux;
983 aux = container_of(map, struct bpf_array, map)->aux;
984 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
986 list_for_each_entry(elem, &aux->poke_progs, list) {
987 struct bpf_jit_poke_descriptor *poke;
990 for (i = 0; i < elem->aux->size_poke_tab; i++) {
991 poke = &elem->aux->poke_tab[i];
993 /* Few things to be aware of:
995 * 1) We can only ever access aux in this context, but
996 * not aux->prog since it might not be stable yet and
997 * there could be danger of use after free otherwise.
998 * 2) Initially when we start tracking aux, the program
999 * is not JITed yet and also does not have a kallsyms
1000 * entry. We skip these as poke->tailcall_target_stable
1001 * is not active yet. The JIT will do the final fixup
1002 * before setting it stable. The various
1003 * poke->tailcall_target_stable are successively
1004 * activated, so tail call updates can arrive from here
1005 * while JIT is still finishing its final fixup for
1006 * non-activated poke entries.
1007 * 3) On program teardown, the program's kallsym entry gets
1008 * removed out of RCU callback, but we can only untrack
1009 * from sleepable context, therefore bpf_arch_text_poke()
1010 * might not see that this is in BPF text section and
1011 * bails out with -EINVAL. As these are unreachable since
1012 * RCU grace period already passed, we simply skip them.
1013 * 4) Also programs reaching refcount of zero while patching
1014 * is in progress is okay since we're protected under
1015 * poke_mutex and untrack the programs before the JIT
1016 * buffer is freed. When we're still in the middle of
1017 * patching and suddenly kallsyms entry of the program
1018 * gets evicted, we just skip the rest which is fine due
1020 * 5) Any other error happening below from bpf_arch_text_poke()
1021 * is a unexpected bug.
1023 if (!READ_ONCE(poke->tailcall_target_stable))
1025 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1027 if (poke->tail_call.map != map ||
1028 poke->tail_call.key != key)
1031 old_bypass_addr = old ? NULL : poke->bypass_addr;
1032 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1033 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1036 ret = bpf_arch_text_poke(poke->tailcall_target,
1038 old_addr, new_addr);
1039 BUG_ON(ret < 0 && ret != -EINVAL);
1041 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1045 BUG_ON(ret < 0 && ret != -EINVAL);
1048 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1052 BUG_ON(ret < 0 && ret != -EINVAL);
1053 /* let other CPUs finish the execution of program
1054 * so that it will not possible to expose them
1055 * to invalid nop, stack unwind, nop state
1059 ret = bpf_arch_text_poke(poke->tailcall_target,
1062 BUG_ON(ret < 0 && ret != -EINVAL);
1068 static void prog_array_map_clear_deferred(struct work_struct *work)
1070 struct bpf_map *map = container_of(work, struct bpf_array_aux,
1072 bpf_fd_array_map_clear(map);
1076 static void prog_array_map_clear(struct bpf_map *map)
1078 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1081 schedule_work(&aux->work);
1084 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1086 struct bpf_array_aux *aux;
1087 struct bpf_map *map;
1089 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1091 return ERR_PTR(-ENOMEM);
1093 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1094 INIT_LIST_HEAD(&aux->poke_progs);
1095 mutex_init(&aux->poke_mutex);
1097 map = array_map_alloc(attr);
1103 container_of(map, struct bpf_array, map)->aux = aux;
1109 static void prog_array_map_free(struct bpf_map *map)
1111 struct prog_poke_elem *elem, *tmp;
1112 struct bpf_array_aux *aux;
1114 aux = container_of(map, struct bpf_array, map)->aux;
1115 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1116 list_del_init(&elem->list);
1120 fd_array_map_free(map);
1123 /* prog_array->aux->{type,jited} is a runtime binding.
1124 * Doing static check alone in the verifier is not enough.
1125 * Thus, prog_array_map cannot be used as an inner_map
1126 * and map_meta_equal is not implemented.
1128 const struct bpf_map_ops prog_array_map_ops = {
1129 .map_alloc_check = fd_array_map_alloc_check,
1130 .map_alloc = prog_array_map_alloc,
1131 .map_free = prog_array_map_free,
1132 .map_poke_track = prog_array_map_poke_track,
1133 .map_poke_untrack = prog_array_map_poke_untrack,
1134 .map_poke_run = prog_array_map_poke_run,
1135 .map_get_next_key = array_map_get_next_key,
1136 .map_lookup_elem = fd_array_map_lookup_elem,
1137 .map_delete_elem = fd_array_map_delete_elem,
1138 .map_fd_get_ptr = prog_fd_array_get_ptr,
1139 .map_fd_put_ptr = prog_fd_array_put_ptr,
1140 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1141 .map_release_uref = prog_array_map_clear,
1142 .map_seq_show_elem = prog_array_map_seq_show_elem,
1143 .map_btf_id = &array_map_btf_ids[0],
1146 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1147 struct file *map_file)
1149 struct bpf_event_entry *ee;
1151 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1153 ee->event = perf_file->private_data;
1154 ee->perf_file = perf_file;
1155 ee->map_file = map_file;
1161 static void __bpf_event_entry_free(struct rcu_head *rcu)
1163 struct bpf_event_entry *ee;
1165 ee = container_of(rcu, struct bpf_event_entry, rcu);
1166 fput(ee->perf_file);
1170 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1172 call_rcu(&ee->rcu, __bpf_event_entry_free);
1175 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1176 struct file *map_file, int fd)
1178 struct bpf_event_entry *ee;
1179 struct perf_event *event;
1180 struct file *perf_file;
1183 perf_file = perf_event_get(fd);
1184 if (IS_ERR(perf_file))
1187 ee = ERR_PTR(-EOPNOTSUPP);
1188 event = perf_file->private_data;
1189 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1192 ee = bpf_event_entry_gen(perf_file, map_file);
1195 ee = ERR_PTR(-ENOMEM);
1201 static void perf_event_fd_array_put_ptr(void *ptr)
1203 bpf_event_entry_free_rcu(ptr);
1206 static void perf_event_fd_array_release(struct bpf_map *map,
1207 struct file *map_file)
1209 struct bpf_array *array = container_of(map, struct bpf_array, map);
1210 struct bpf_event_entry *ee;
1213 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1217 for (i = 0; i < array->map.max_entries; i++) {
1218 ee = READ_ONCE(array->ptrs[i]);
1219 if (ee && ee->map_file == map_file)
1220 fd_array_map_delete_elem(map, &i);
1225 static void perf_event_fd_array_map_free(struct bpf_map *map)
1227 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1228 bpf_fd_array_map_clear(map);
1229 fd_array_map_free(map);
1232 const struct bpf_map_ops perf_event_array_map_ops = {
1233 .map_meta_equal = bpf_map_meta_equal,
1234 .map_alloc_check = fd_array_map_alloc_check,
1235 .map_alloc = array_map_alloc,
1236 .map_free = perf_event_fd_array_map_free,
1237 .map_get_next_key = array_map_get_next_key,
1238 .map_lookup_elem = fd_array_map_lookup_elem,
1239 .map_delete_elem = fd_array_map_delete_elem,
1240 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1241 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
1242 .map_release = perf_event_fd_array_release,
1243 .map_check_btf = map_check_no_btf,
1244 .map_btf_id = &array_map_btf_ids[0],
1247 #ifdef CONFIG_CGROUPS
1248 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1249 struct file *map_file /* not used */,
1252 return cgroup_get_from_fd(fd);
1255 static void cgroup_fd_array_put_ptr(void *ptr)
1257 /* cgroup_put free cgrp after a rcu grace period */
1261 static void cgroup_fd_array_free(struct bpf_map *map)
1263 bpf_fd_array_map_clear(map);
1264 fd_array_map_free(map);
1267 const struct bpf_map_ops cgroup_array_map_ops = {
1268 .map_meta_equal = bpf_map_meta_equal,
1269 .map_alloc_check = fd_array_map_alloc_check,
1270 .map_alloc = array_map_alloc,
1271 .map_free = cgroup_fd_array_free,
1272 .map_get_next_key = array_map_get_next_key,
1273 .map_lookup_elem = fd_array_map_lookup_elem,
1274 .map_delete_elem = fd_array_map_delete_elem,
1275 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1276 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1277 .map_check_btf = map_check_no_btf,
1278 .map_btf_id = &array_map_btf_ids[0],
1282 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1284 struct bpf_map *map, *inner_map_meta;
1286 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1287 if (IS_ERR(inner_map_meta))
1288 return inner_map_meta;
1290 map = array_map_alloc(attr);
1292 bpf_map_meta_free(inner_map_meta);
1296 map->inner_map_meta = inner_map_meta;
1301 static void array_of_map_free(struct bpf_map *map)
1303 /* map->inner_map_meta is only accessed by syscall which
1304 * is protected by fdget/fdput.
1306 bpf_map_meta_free(map->inner_map_meta);
1307 bpf_fd_array_map_clear(map);
1308 fd_array_map_free(map);
1311 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1313 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1318 return READ_ONCE(*inner_map);
1321 static int array_of_map_gen_lookup(struct bpf_map *map,
1322 struct bpf_insn *insn_buf)
1324 struct bpf_array *array = container_of(map, struct bpf_array, map);
1325 u32 elem_size = round_up(map->value_size, 8);
1326 struct bpf_insn *insn = insn_buf;
1327 const int ret = BPF_REG_0;
1328 const int map_ptr = BPF_REG_1;
1329 const int index = BPF_REG_2;
1331 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1332 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1333 if (!map->bypass_spec_v1) {
1334 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1335 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1337 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1339 if (is_power_of_2(elem_size))
1340 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1342 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1343 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1344 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1345 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1346 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1347 *insn++ = BPF_MOV64_IMM(ret, 0);
1349 return insn - insn_buf;
1352 const struct bpf_map_ops array_of_maps_map_ops = {
1353 .map_alloc_check = fd_array_map_alloc_check,
1354 .map_alloc = array_of_map_alloc,
1355 .map_free = array_of_map_free,
1356 .map_get_next_key = array_map_get_next_key,
1357 .map_lookup_elem = array_of_map_lookup_elem,
1358 .map_delete_elem = fd_array_map_delete_elem,
1359 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1360 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1361 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1362 .map_gen_lookup = array_of_map_gen_lookup,
1363 .map_lookup_batch = generic_map_lookup_batch,
1364 .map_update_batch = generic_map_update_batch,
1365 .map_check_btf = map_check_no_btf,
1366 .map_btf_id = &array_map_btf_ids[0],