1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
5 #include <linux/bpf_verifier.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
16 enum bpf_struct_ops_state {
17 BPF_STRUCT_OPS_STATE_INIT,
18 BPF_STRUCT_OPS_STATE_INUSE,
19 BPF_STRUCT_OPS_STATE_TOBEFREE,
20 BPF_STRUCT_OPS_STATE_READY,
23 #define BPF_STRUCT_OPS_COMMON_VALUE \
25 enum bpf_struct_ops_state state
27 struct bpf_struct_ops_value {
28 BPF_STRUCT_OPS_COMMON_VALUE;
29 char data[] ____cacheline_aligned_in_smp;
32 struct bpf_struct_ops_map {
35 const struct bpf_struct_ops *st_ops;
36 /* protect map_update */
38 /* link has all the bpf_links that is populated
39 * to the func ptr of the kernel's struct
42 struct bpf_link **links;
43 /* image is a page that has all the trampolines
44 * that stores the func args before calling the bpf_prog.
45 * A PAGE_SIZE "image" is enough to store all trampoline for
49 /* uvalue->data stores the kernel struct
50 * (e.g. tcp_congestion_ops) that is more useful
51 * to userspace than the kvalue. For example,
52 * the bpf_prog's id is stored instead of the kernel
53 * address of a func ptr.
55 struct bpf_struct_ops_value *uvalue;
56 /* kvalue.data stores the actual kernel's struct
57 * (e.g. tcp_congestion_ops) that will be
58 * registered to the kernel subsystem.
60 struct bpf_struct_ops_value kvalue;
63 struct bpf_struct_ops_link {
65 struct bpf_map __rcu *map;
68 static DEFINE_MUTEX(update_mutex);
70 #define VALUE_PREFIX "bpf_struct_ops_"
71 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
73 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
74 * the map's value exposed to the userspace and its btf-type-id is
75 * stored at the map->btf_vmlinux_value_type_id.
78 #define BPF_STRUCT_OPS_TYPE(_name) \
79 extern struct bpf_struct_ops bpf_##_name; \
81 struct bpf_struct_ops_##_name { \
82 BPF_STRUCT_OPS_COMMON_VALUE; \
83 struct _name data ____cacheline_aligned_in_smp; \
85 #include "bpf_struct_ops_types.h"
86 #undef BPF_STRUCT_OPS_TYPE
89 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
90 #include "bpf_struct_ops_types.h"
91 #undef BPF_STRUCT_OPS_TYPE
92 __NR_BPF_STRUCT_OPS_TYPE,
95 static struct bpf_struct_ops * const bpf_struct_ops[] = {
96 #define BPF_STRUCT_OPS_TYPE(_name) \
97 [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
98 #include "bpf_struct_ops_types.h"
99 #undef BPF_STRUCT_OPS_TYPE
102 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
105 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
107 .test_run = bpf_struct_ops_test_run,
111 static const struct btf_type *module_type;
113 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
115 s32 type_id, value_id, module_id;
116 const struct btf_member *member;
117 struct bpf_struct_ops *st_ops;
118 const struct btf_type *t;
119 char value_name[128];
123 /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
124 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
125 #include "bpf_struct_ops_types.h"
126 #undef BPF_STRUCT_OPS_TYPE
128 module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
130 pr_warn("Cannot find struct module in btf_vmlinux\n");
133 module_type = btf_type_by_id(btf, module_id);
135 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
136 st_ops = bpf_struct_ops[i];
138 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
139 sizeof(value_name)) {
140 pr_warn("struct_ops name %s is too long\n",
144 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
146 value_id = btf_find_by_name_kind(btf, value_name,
149 pr_warn("Cannot find struct %s in btf_vmlinux\n",
154 type_id = btf_find_by_name_kind(btf, st_ops->name,
157 pr_warn("Cannot find struct %s in btf_vmlinux\n",
161 t = btf_type_by_id(btf, type_id);
162 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
163 pr_warn("Cannot support #%u members in struct %s\n",
164 btf_type_vlen(t), st_ops->name);
168 for_each_member(j, t, member) {
169 const struct btf_type *func_proto;
171 mname = btf_name_by_offset(btf, member->name_off);
173 pr_warn("anon member in struct %s is not supported\n",
178 if (__btf_member_bitfield_size(t, member)) {
179 pr_warn("bit field member %s in struct %s is not supported\n",
180 mname, st_ops->name);
184 func_proto = btf_type_resolve_func_ptr(btf,
188 btf_distill_func_proto(log, btf,
190 &st_ops->func_models[j])) {
191 pr_warn("Error in parsing func ptr %s in struct %s\n",
192 mname, st_ops->name);
197 if (j == btf_type_vlen(t)) {
198 if (st_ops->init(btf)) {
199 pr_warn("Error in init bpf_struct_ops %s\n",
202 st_ops->type_id = type_id;
204 st_ops->value_id = value_id;
205 st_ops->value_type = btf_type_by_id(btf,
212 extern struct btf *btf_vmlinux;
214 static const struct bpf_struct_ops *
215 bpf_struct_ops_find_value(u32 value_id)
219 if (!value_id || !btf_vmlinux)
222 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
223 if (bpf_struct_ops[i]->value_id == value_id)
224 return bpf_struct_ops[i];
230 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
234 if (!type_id || !btf_vmlinux)
237 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
238 if (bpf_struct_ops[i]->type_id == type_id)
239 return bpf_struct_ops[i];
245 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
248 if (key && *(u32 *)key == 0)
251 *(u32 *)next_key = 0;
255 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
258 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
259 struct bpf_struct_ops_value *uvalue, *kvalue;
260 enum bpf_struct_ops_state state;
263 if (unlikely(*(u32 *)key != 0))
266 kvalue = &st_map->kvalue;
267 /* Pair with smp_store_release() during map_update */
268 state = smp_load_acquire(&kvalue->state);
269 if (state == BPF_STRUCT_OPS_STATE_INIT) {
270 memset(value, 0, map->value_size);
274 /* No lock is needed. state and refcnt do not need
275 * to be updated together under atomic context.
278 memcpy(uvalue, st_map->uvalue, map->value_size);
279 uvalue->state = state;
281 /* This value offers the user space a general estimate of how
282 * many sockets are still utilizing this struct_ops for TCP
283 * congestion control. The number might not be exact, but it
284 * should sufficiently meet our present goals.
286 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
287 refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0));
292 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
294 return ERR_PTR(-EINVAL);
297 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
299 const struct btf_type *t = st_map->st_ops->type;
302 for (i = 0; i < btf_type_vlen(t); i++) {
303 if (st_map->links[i]) {
304 bpf_link_put(st_map->links[i]);
305 st_map->links[i] = NULL;
310 static int check_zero_holes(const struct btf_type *t, void *data)
312 const struct btf_member *member;
313 u32 i, moff, msize, prev_mend = 0;
314 const struct btf_type *mtype;
316 for_each_member(i, t, member) {
317 moff = __btf_member_bit_offset(t, member) / 8;
318 if (moff > prev_mend &&
319 memchr_inv(data + prev_mend, 0, moff - prev_mend))
322 mtype = btf_type_by_id(btf_vmlinux, member->type);
323 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
325 return PTR_ERR(mtype);
326 prev_mend = moff + msize;
329 if (t->size > prev_mend &&
330 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
336 static void bpf_struct_ops_link_release(struct bpf_link *link)
340 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
342 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
347 const struct bpf_link_ops bpf_struct_ops_link_lops = {
348 .release = bpf_struct_ops_link_release,
349 .dealloc = bpf_struct_ops_link_dealloc,
352 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
353 struct bpf_tramp_link *link,
354 const struct btf_func_model *model,
355 void *image, void *image_end)
359 tlinks[BPF_TRAMP_FENTRY].links[0] = link;
360 tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
361 /* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
362 * and it must be used alone.
364 flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
365 return arch_prepare_bpf_trampoline(NULL, image, image_end,
366 model, flags, tlinks, NULL);
369 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
370 void *value, u64 flags)
372 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
373 const struct bpf_struct_ops *st_ops = st_map->st_ops;
374 struct bpf_struct_ops_value *uvalue, *kvalue;
375 const struct btf_member *member;
376 const struct btf_type *t = st_ops->type;
377 struct bpf_tramp_links *tlinks;
380 void *image, *image_end;
386 if (*(u32 *)key != 0)
389 err = check_zero_holes(st_ops->value_type, value);
394 err = check_zero_holes(t, uvalue->data);
398 if (uvalue->state || refcount_read(&uvalue->refcnt))
401 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
405 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
406 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
408 mutex_lock(&st_map->lock);
410 if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
415 memcpy(uvalue, value, map->value_size);
417 udata = &uvalue->data;
418 kdata = &kvalue->data;
419 image = st_map->image;
420 image_end = st_map->image + PAGE_SIZE;
422 for_each_member(i, t, member) {
423 const struct btf_type *mtype, *ptype;
424 struct bpf_prog *prog;
425 struct bpf_tramp_link *link;
428 moff = __btf_member_bit_offset(t, member) / 8;
429 ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
430 if (ptype == module_type) {
431 if (*(void **)(udata + moff))
433 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
437 err = st_ops->init_member(t, member, kdata, udata);
441 /* The ->init_member() has handled this member */
445 /* If st_ops->init_member does not handle it,
446 * we will only handle func ptrs and zero-ed members
447 * here. Reject everything else.
450 /* All non func ptr member must be 0 */
451 if (!ptype || !btf_type_is_func_proto(ptype)) {
454 mtype = btf_type_by_id(btf_vmlinux, member->type);
455 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
457 err = PTR_ERR(mtype);
461 if (memchr_inv(udata + moff, 0, msize)) {
469 prog_fd = (int)(*(unsigned long *)(udata + moff));
470 /* Similar check as the attr->attach_prog_fd */
474 prog = bpf_prog_get(prog_fd);
480 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
481 prog->aux->attach_btf_id != st_ops->type_id ||
482 prog->expected_attach_type != i) {
488 link = kzalloc(sizeof(*link), GFP_USER);
494 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
495 &bpf_struct_ops_link_lops, prog);
496 st_map->links[i] = &link->link;
498 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
499 &st_ops->func_models[i],
504 *(void **)(kdata + moff) = image;
507 /* put prog_id to udata */
508 *(unsigned long *)(udata + moff) = prog->aux->id;
511 if (st_map->map.map_flags & BPF_F_LINK) {
513 if (st_ops->validate) {
514 err = st_ops->validate(kdata);
518 set_memory_rox((long)st_map->image, 1);
519 /* Let bpf_link handle registration & unregistration.
521 * Pair with smp_load_acquire() during lookup_elem().
523 smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY);
527 set_memory_rox((long)st_map->image, 1);
528 err = st_ops->reg(kdata);
530 /* This refcnt increment on the map here after
531 * 'st_ops->reg()' is secure since the state of the
532 * map must be set to INIT at this moment, and thus
533 * bpf_struct_ops_map_delete_elem() can't unregister
534 * or transition it to TOBEFREE concurrently.
537 /* Pair with smp_load_acquire() during lookup_elem().
538 * It ensures the above udata updates (e.g. prog->aux->id)
539 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
541 smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
545 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
546 * verified as a whole, after all init_member() calls. Can also happen if
547 * there was a race in registering the struct_ops (under the same name) to
548 * a sub-system through different struct_ops's maps.
550 set_memory_nx((long)st_map->image, 1);
551 set_memory_rw((long)st_map->image, 1);
554 bpf_struct_ops_map_put_progs(st_map);
555 memset(uvalue, 0, map->value_size);
556 memset(kvalue, 0, map->value_size);
559 mutex_unlock(&st_map->lock);
563 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
565 enum bpf_struct_ops_state prev_state;
566 struct bpf_struct_ops_map *st_map;
568 st_map = (struct bpf_struct_ops_map *)map;
569 if (st_map->map.map_flags & BPF_F_LINK)
572 prev_state = cmpxchg(&st_map->kvalue.state,
573 BPF_STRUCT_OPS_STATE_INUSE,
574 BPF_STRUCT_OPS_STATE_TOBEFREE);
575 switch (prev_state) {
576 case BPF_STRUCT_OPS_STATE_INUSE:
577 st_map->st_ops->unreg(&st_map->kvalue.data);
580 case BPF_STRUCT_OPS_STATE_TOBEFREE:
582 case BPF_STRUCT_OPS_STATE_INIT:
586 /* Should never happen. Treat it as not found. */
591 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
597 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
601 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
603 btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
611 static void __bpf_struct_ops_map_free(struct bpf_map *map)
613 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
616 bpf_struct_ops_map_put_progs(st_map);
617 bpf_map_area_free(st_map->links);
619 bpf_jit_free_exec(st_map->image);
620 bpf_jit_uncharge_modmem(PAGE_SIZE);
622 bpf_map_area_free(st_map->uvalue);
623 bpf_map_area_free(st_map);
626 static void bpf_struct_ops_map_free(struct bpf_map *map)
628 /* The struct_ops's function may switch to another struct_ops.
630 * For example, bpf_tcp_cc_x->init() may switch to
631 * another tcp_cc_y by calling
632 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
633 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
634 * and its refcount may reach 0 which then free its
635 * trampoline image while tcp_cc_x is still running.
637 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
638 * to finish. bpf-tcp-cc prog is non sleepable.
639 * A rcu_tasks gp is to wait for the last few insn
640 * in the tramopline image to finish before releasing
641 * the trampoline image.
643 synchronize_rcu_mult(call_rcu, call_rcu_tasks);
645 __bpf_struct_ops_map_free(map);
648 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
650 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
651 (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id)
656 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
658 const struct bpf_struct_ops *st_ops;
660 struct bpf_struct_ops_map *st_map;
661 const struct btf_type *t, *vt;
665 st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
667 return ERR_PTR(-ENOTSUPP);
669 vt = st_ops->value_type;
670 if (attr->value_size != vt->size)
671 return ERR_PTR(-EINVAL);
675 st_map_size = sizeof(*st_map) +
677 * struct bpf_struct_ops_tcp_congestions_ops
679 (vt->size - sizeof(struct bpf_struct_ops_value));
681 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
683 return ERR_PTR(-ENOMEM);
685 st_map->st_ops = st_ops;
688 ret = bpf_jit_charge_modmem(PAGE_SIZE);
690 __bpf_struct_ops_map_free(map);
694 st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
695 if (!st_map->image) {
696 /* __bpf_struct_ops_map_free() uses st_map->image as flag
697 * for "charged or not". In this case, we need to unchange
700 bpf_jit_uncharge_modmem(PAGE_SIZE);
701 __bpf_struct_ops_map_free(map);
702 return ERR_PTR(-ENOMEM);
704 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
706 bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
708 if (!st_map->uvalue || !st_map->links) {
709 __bpf_struct_ops_map_free(map);
710 return ERR_PTR(-ENOMEM);
713 mutex_init(&st_map->lock);
714 set_vm_flush_reset_perms(st_map->image);
715 bpf_map_init_from_attr(map, attr);
720 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
722 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
723 const struct bpf_struct_ops *st_ops = st_map->st_ops;
724 const struct btf_type *vt = st_ops->value_type;
727 usage = sizeof(*st_map) +
728 vt->size - sizeof(struct bpf_struct_ops_value);
730 usage += btf_type_vlen(vt) * sizeof(struct bpf_links *);
735 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
736 const struct bpf_map_ops bpf_struct_ops_map_ops = {
737 .map_alloc_check = bpf_struct_ops_map_alloc_check,
738 .map_alloc = bpf_struct_ops_map_alloc,
739 .map_free = bpf_struct_ops_map_free,
740 .map_get_next_key = bpf_struct_ops_map_get_next_key,
741 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
742 .map_delete_elem = bpf_struct_ops_map_delete_elem,
743 .map_update_elem = bpf_struct_ops_map_update_elem,
744 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
745 .map_mem_usage = bpf_struct_ops_map_mem_usage,
746 .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
749 /* "const void *" because some subsystem is
750 * passing a const (e.g. const struct tcp_congestion_ops *)
752 bool bpf_struct_ops_get(const void *kdata)
754 struct bpf_struct_ops_value *kvalue;
755 struct bpf_struct_ops_map *st_map;
758 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
759 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
761 map = __bpf_map_inc_not_zero(&st_map->map, false);
765 void bpf_struct_ops_put(const void *kdata)
767 struct bpf_struct_ops_value *kvalue;
768 struct bpf_struct_ops_map *st_map;
770 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
771 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
773 bpf_map_put(&st_map->map);
776 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
778 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
780 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
781 map->map_flags & BPF_F_LINK &&
782 /* Pair with smp_store_release() during map_update */
783 smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY;
786 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
788 struct bpf_struct_ops_link *st_link;
789 struct bpf_struct_ops_map *st_map;
791 st_link = container_of(link, struct bpf_struct_ops_link, link);
792 st_map = (struct bpf_struct_ops_map *)
793 rcu_dereference_protected(st_link->map, true);
795 /* st_link->map can be NULL if
796 * bpf_struct_ops_link_create() fails to register.
798 st_map->st_ops->unreg(&st_map->kvalue.data);
799 bpf_map_put(&st_map->map);
804 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
805 struct seq_file *seq)
807 struct bpf_struct_ops_link *st_link;
810 st_link = container_of(link, struct bpf_struct_ops_link, link);
812 map = rcu_dereference(st_link->map);
813 seq_printf(seq, "map_id:\t%d\n", map->id);
817 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
818 struct bpf_link_info *info)
820 struct bpf_struct_ops_link *st_link;
823 st_link = container_of(link, struct bpf_struct_ops_link, link);
825 map = rcu_dereference(st_link->map);
826 info->struct_ops.map_id = map->id;
831 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
832 struct bpf_map *expected_old_map)
834 struct bpf_struct_ops_map *st_map, *old_st_map;
835 struct bpf_map *old_map;
836 struct bpf_struct_ops_link *st_link;
839 st_link = container_of(link, struct bpf_struct_ops_link, link);
840 st_map = container_of(new_map, struct bpf_struct_ops_map, map);
842 if (!bpf_struct_ops_valid_to_reg(new_map))
845 if (!st_map->st_ops->update)
848 mutex_lock(&update_mutex);
850 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
851 if (expected_old_map && old_map != expected_old_map) {
856 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
857 /* The new and old struct_ops must be the same type. */
858 if (st_map->st_ops != old_st_map->st_ops) {
863 err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
867 bpf_map_inc(new_map);
868 rcu_assign_pointer(st_link->map, new_map);
869 bpf_map_put(old_map);
872 mutex_unlock(&update_mutex);
877 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
878 .dealloc = bpf_struct_ops_map_link_dealloc,
879 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
880 .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
881 .update_map = bpf_struct_ops_map_link_update,
884 int bpf_struct_ops_link_create(union bpf_attr *attr)
886 struct bpf_struct_ops_link *link = NULL;
887 struct bpf_link_primer link_primer;
888 struct bpf_struct_ops_map *st_map;
892 map = bpf_map_get(attr->link_create.map_fd);
896 st_map = (struct bpf_struct_ops_map *)map;
898 if (!bpf_struct_ops_valid_to_reg(map)) {
903 link = kzalloc(sizeof(*link), GFP_USER);
908 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
910 err = bpf_link_prime(&link->link, &link_primer);
914 err = st_map->st_ops->reg(st_map->kvalue.data);
916 bpf_link_cleanup(&link_primer);
920 RCU_INIT_POINTER(link->map, map);
922 return bpf_link_settle(&link_primer);