1 // SPDX-License-Identifier: GPL-2.0-only
3 * Functions to manage eBPF programs attached to cgroups
5 * Copyright (c) 2016 Daniel Mack
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
18 #include <net/bpf_sk_storage.h>
20 #include "../cgroup/cgroup-internal.h"
22 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
25 void cgroup_bpf_offline(struct cgroup *cgrp)
28 percpu_ref_kill(&cgrp->bpf.refcnt);
31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
33 enum bpf_cgroup_storage_type stype;
35 for_each_cgroup_storage_type(stype)
36 bpf_cgroup_storage_free(storages[stype]);
39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
40 struct bpf_cgroup_storage *new_storages[],
41 enum bpf_attach_type type,
42 struct bpf_prog *prog,
45 enum bpf_cgroup_storage_type stype;
46 struct bpf_cgroup_storage_key key;
49 key.cgroup_inode_id = cgroup_id(cgrp);
50 key.attach_type = type;
52 for_each_cgroup_storage_type(stype) {
53 map = prog->aux->cgroup_storage[stype];
57 storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
61 storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
62 if (IS_ERR(storages[stype])) {
63 bpf_cgroup_storages_free(new_storages);
67 new_storages[stype] = storages[stype];
73 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
74 struct bpf_cgroup_storage *src[])
76 enum bpf_cgroup_storage_type stype;
78 for_each_cgroup_storage_type(stype)
79 dst[stype] = src[stype];
82 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
84 enum bpf_attach_type attach_type)
86 enum bpf_cgroup_storage_type stype;
88 for_each_cgroup_storage_type(stype)
89 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
92 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
94 * doesn't free link memory, which will eventually be done by bpf_link's
95 * release() callback, when its last FD is closed.
97 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
99 cgroup_put(link->cgroup);
104 * cgroup_bpf_release() - put references of all bpf programs and
105 * release all cgroup bpf data
106 * @work: work structure embedded into the cgroup to modify
108 static void cgroup_bpf_release(struct work_struct *work)
110 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
112 struct bpf_prog_array *old_array;
113 struct list_head *storages = &cgrp->bpf.storages;
114 struct bpf_cgroup_storage *storage, *stmp;
118 mutex_lock(&cgroup_mutex);
120 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
121 struct list_head *progs = &cgrp->bpf.progs[atype];
122 struct bpf_prog_list *pl, *pltmp;
124 list_for_each_entry_safe(pl, pltmp, progs, node) {
127 bpf_prog_put(pl->prog);
129 bpf_cgroup_link_auto_detach(pl->link);
131 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
133 old_array = rcu_dereference_protected(
134 cgrp->bpf.effective[atype],
135 lockdep_is_held(&cgroup_mutex));
136 bpf_prog_array_free(old_array);
139 list_for_each_entry_safe(storage, stmp, storages, list_cg) {
140 bpf_cgroup_storage_unlink(storage);
141 bpf_cgroup_storage_free(storage);
144 mutex_unlock(&cgroup_mutex);
146 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
149 percpu_ref_exit(&cgrp->bpf.refcnt);
154 * cgroup_bpf_release_fn() - callback used to schedule releasing
156 * @ref: percpu ref counter structure
158 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
160 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
162 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
163 queue_work(system_wq, &cgrp->bpf.release_work);
166 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
167 * link or direct prog.
169 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
174 return pl->link->link.prog;
178 /* count number of elements in the list.
179 * it's slow but the list cannot be long
181 static u32 prog_list_length(struct list_head *head)
183 struct bpf_prog_list *pl;
186 list_for_each_entry(pl, head, node) {
187 if (!prog_list_prog(pl))
194 /* if parent has non-overridable prog attached,
195 * disallow attaching new programs to the descendent cgroup.
196 * if parent has overridable or multi-prog, allow attaching
198 static bool hierarchy_allows_attach(struct cgroup *cgrp,
199 enum cgroup_bpf_attach_type atype)
203 p = cgroup_parent(cgrp);
207 u32 flags = p->bpf.flags[atype];
210 if (flags & BPF_F_ALLOW_MULTI)
212 cnt = prog_list_length(&p->bpf.progs[atype]);
213 WARN_ON_ONCE(cnt > 1);
215 return !!(flags & BPF_F_ALLOW_OVERRIDE);
216 p = cgroup_parent(p);
221 /* compute a chain of effective programs for a given cgroup:
222 * start from the list of programs in this cgroup and add
223 * all parent programs.
224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
225 * to programs in this cgroup
227 static int compute_effective_progs(struct cgroup *cgrp,
228 enum cgroup_bpf_attach_type atype,
229 struct bpf_prog_array **array)
231 struct bpf_prog_array_item *item;
232 struct bpf_prog_array *progs;
233 struct bpf_prog_list *pl;
234 struct cgroup *p = cgrp;
237 /* count number of effective programs by walking parents */
239 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
240 cnt += prog_list_length(&p->bpf.progs[atype]);
241 p = cgroup_parent(p);
244 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
248 /* populate the array with effective progs */
252 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
255 list_for_each_entry(pl, &p->bpf.progs[atype], node) {
256 if (!prog_list_prog(pl))
259 item = &progs->items[cnt];
260 item->prog = prog_list_prog(pl);
261 bpf_cgroup_storages_assign(item->cgroup_storage,
265 } while ((p = cgroup_parent(p)));
271 static void activate_effective_progs(struct cgroup *cgrp,
272 enum cgroup_bpf_attach_type atype,
273 struct bpf_prog_array *old_array)
275 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
276 lockdep_is_held(&cgroup_mutex));
277 /* free prog array after grace period, since __cgroup_bpf_run_*()
278 * might be still walking the array
280 bpf_prog_array_free(old_array);
284 * cgroup_bpf_inherit() - inherit effective programs from parent
285 * @cgrp: the cgroup to modify
287 int cgroup_bpf_inherit(struct cgroup *cgrp)
289 /* has to use marco instead of const int, since compiler thinks
290 * that array below is variable length
292 #define NR ARRAY_SIZE(cgrp->bpf.effective)
293 struct bpf_prog_array *arrays[NR] = {};
297 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
302 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
305 for (i = 0; i < NR; i++)
306 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
308 INIT_LIST_HEAD(&cgrp->bpf.storages);
310 for (i = 0; i < NR; i++)
311 if (compute_effective_progs(cgrp, i, &arrays[i]))
314 for (i = 0; i < NR; i++)
315 activate_effective_progs(cgrp, i, arrays[i]);
319 for (i = 0; i < NR; i++)
320 bpf_prog_array_free(arrays[i]);
322 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
325 percpu_ref_exit(&cgrp->bpf.refcnt);
330 static int update_effective_progs(struct cgroup *cgrp,
331 enum cgroup_bpf_attach_type atype)
333 struct cgroup_subsys_state *css;
336 /* allocate and recompute effective prog arrays */
337 css_for_each_descendant_pre(css, &cgrp->self) {
338 struct cgroup *desc = container_of(css, struct cgroup, self);
340 if (percpu_ref_is_zero(&desc->bpf.refcnt))
343 err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
348 /* all allocations were successful. Activate all prog arrays */
349 css_for_each_descendant_pre(css, &cgrp->self) {
350 struct cgroup *desc = container_of(css, struct cgroup, self);
352 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
353 if (unlikely(desc->bpf.inactive)) {
354 bpf_prog_array_free(desc->bpf.inactive);
355 desc->bpf.inactive = NULL;
360 activate_effective_progs(desc, atype, desc->bpf.inactive);
361 desc->bpf.inactive = NULL;
367 /* oom while computing effective. Free all computed effective arrays
368 * since they were not activated
370 css_for_each_descendant_pre(css, &cgrp->self) {
371 struct cgroup *desc = container_of(css, struct cgroup, self);
373 bpf_prog_array_free(desc->bpf.inactive);
374 desc->bpf.inactive = NULL;
380 #define BPF_CGROUP_MAX_PROGS 64
382 static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
383 struct bpf_prog *prog,
384 struct bpf_cgroup_link *link,
385 struct bpf_prog *replace_prog,
388 struct bpf_prog_list *pl;
390 /* single-attach case */
392 if (list_empty(progs))
394 return list_first_entry(progs, typeof(*pl), node);
397 list_for_each_entry(pl, progs, node) {
398 if (prog && pl->prog == prog && prog != replace_prog)
399 /* disallow attaching the same prog twice */
400 return ERR_PTR(-EINVAL);
401 if (link && pl->link == link)
402 /* disallow attaching the same link twice */
403 return ERR_PTR(-EINVAL);
406 /* direct prog multi-attach w/ replacement case */
408 list_for_each_entry(pl, progs, node) {
409 if (pl->prog == replace_prog)
413 /* prog to replace not found for cgroup */
414 return ERR_PTR(-ENOENT);
421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
422 * propagate the change to descendants
423 * @cgrp: The cgroup which descendants to traverse
424 * @prog: A program to attach
425 * @link: A link to attach
426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
427 * @type: Type of attach operation
428 * @flags: Option flags
430 * Exactly one of @prog or @link can be non-null.
431 * Must be called with cgroup_mutex held.
433 static int __cgroup_bpf_attach(struct cgroup *cgrp,
434 struct bpf_prog *prog, struct bpf_prog *replace_prog,
435 struct bpf_cgroup_link *link,
436 enum bpf_attach_type type, u32 flags)
438 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
439 struct bpf_prog *old_prog = NULL;
440 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
441 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
442 enum cgroup_bpf_attach_type atype;
443 struct bpf_prog_list *pl;
444 struct list_head *progs;
447 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
448 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
449 /* invalid combination */
451 if (link && (prog || replace_prog))
452 /* only either link or prog/replace_prog can be specified */
454 if (!!replace_prog != !!(flags & BPF_F_REPLACE))
455 /* replace_prog implies BPF_F_REPLACE, and vice versa */
458 atype = to_cgroup_bpf_attach_type(type);
462 progs = &cgrp->bpf.progs[atype];
464 if (!hierarchy_allows_attach(cgrp, atype))
467 if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
468 /* Disallow attaching non-overridable on top
469 * of existing overridable in this cgroup.
470 * Disallow attaching multi-prog if overridable or none
474 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
477 pl = find_attach_entry(progs, prog, link, replace_prog,
478 flags & BPF_F_ALLOW_MULTI);
482 if (bpf_cgroup_storages_alloc(storage, new_storage, type,
483 prog ? : link->link.prog, cgrp))
489 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
491 bpf_cgroup_storages_free(new_storage);
494 list_add_tail(&pl->node, progs);
499 bpf_cgroup_storages_assign(pl->storage, storage);
500 cgrp->bpf.flags[atype] = saved_flags;
502 err = update_effective_progs(cgrp, atype);
507 bpf_prog_put(old_prog);
509 static_branch_inc(&cgroup_bpf_enabled_key[atype]);
510 bpf_cgroup_storages_link(new_storage, cgrp, type);
518 bpf_cgroup_storages_free(new_storage);
526 static int cgroup_bpf_attach(struct cgroup *cgrp,
527 struct bpf_prog *prog, struct bpf_prog *replace_prog,
528 struct bpf_cgroup_link *link,
529 enum bpf_attach_type type,
534 mutex_lock(&cgroup_mutex);
535 ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
536 mutex_unlock(&cgroup_mutex);
540 /* Swap updated BPF program for given link in effective program arrays across
541 * all descendant cgroups. This function is guaranteed to succeed.
543 static void replace_effective_prog(struct cgroup *cgrp,
544 enum cgroup_bpf_attach_type atype,
545 struct bpf_cgroup_link *link)
547 struct bpf_prog_array_item *item;
548 struct cgroup_subsys_state *css;
549 struct bpf_prog_array *progs;
550 struct bpf_prog_list *pl;
551 struct list_head *head;
555 css_for_each_descendant_pre(css, &cgrp->self) {
556 struct cgroup *desc = container_of(css, struct cgroup, self);
558 if (percpu_ref_is_zero(&desc->bpf.refcnt))
561 /* find position of link in effective progs array */
562 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
563 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
566 head = &cg->bpf.progs[atype];
567 list_for_each_entry(pl, head, node) {
568 if (!prog_list_prog(pl))
570 if (pl->link == link)
577 progs = rcu_dereference_protected(
578 desc->bpf.effective[atype],
579 lockdep_is_held(&cgroup_mutex));
580 item = &progs->items[pos];
581 WRITE_ONCE(item->prog, link->link.prog);
586 * __cgroup_bpf_replace() - Replace link's program and propagate the change
588 * @cgrp: The cgroup which descendants to traverse
589 * @link: A link for which to replace BPF program
590 * @type: Type of attach operation
592 * Must be called with cgroup_mutex held.
594 static int __cgroup_bpf_replace(struct cgroup *cgrp,
595 struct bpf_cgroup_link *link,
596 struct bpf_prog *new_prog)
598 enum cgroup_bpf_attach_type atype;
599 struct bpf_prog *old_prog;
600 struct bpf_prog_list *pl;
601 struct list_head *progs;
604 atype = to_cgroup_bpf_attach_type(link->type);
608 progs = &cgrp->bpf.progs[atype];
610 if (link->link.prog->type != new_prog->type)
613 list_for_each_entry(pl, progs, node) {
614 if (pl->link == link) {
622 old_prog = xchg(&link->link.prog, new_prog);
623 replace_effective_prog(cgrp, atype, link);
624 bpf_prog_put(old_prog);
628 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
629 struct bpf_prog *old_prog)
631 struct bpf_cgroup_link *cg_link;
634 cg_link = container_of(link, struct bpf_cgroup_link, link);
636 mutex_lock(&cgroup_mutex);
637 /* link might have been auto-released by dying cgroup, so fail */
638 if (!cg_link->cgroup) {
642 if (old_prog && link->prog != old_prog) {
646 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
648 mutex_unlock(&cgroup_mutex);
652 static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
653 struct bpf_prog *prog,
654 struct bpf_cgroup_link *link,
657 struct bpf_prog_list *pl;
660 if (list_empty(progs))
661 /* report error when trying to detach and nothing is attached */
662 return ERR_PTR(-ENOENT);
664 /* to maintain backward compatibility NONE and OVERRIDE cgroups
665 * allow detaching with invalid FD (prog==NULL) in legacy mode
667 return list_first_entry(progs, typeof(*pl), node);
671 /* to detach MULTI prog the user has to specify valid FD
672 * of the program or link to be detached
674 return ERR_PTR(-EINVAL);
676 /* find the prog or link and detach it */
677 list_for_each_entry(pl, progs, node) {
678 if (pl->prog == prog && pl->link == link)
681 return ERR_PTR(-ENOENT);
685 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
686 * propagate the change to descendants
687 * @cgrp: The cgroup which descendants to traverse
688 * @prog: A program to detach or NULL
689 * @link: A link to detach or NULL
690 * @type: Type of detach operation
692 * At most one of @prog or @link can be non-NULL.
693 * Must be called with cgroup_mutex held.
695 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
696 struct bpf_cgroup_link *link, enum bpf_attach_type type)
698 enum cgroup_bpf_attach_type atype;
699 struct bpf_prog *old_prog;
700 struct bpf_prog_list *pl;
701 struct list_head *progs;
705 atype = to_cgroup_bpf_attach_type(type);
709 progs = &cgrp->bpf.progs[atype];
710 flags = cgrp->bpf.flags[atype];
713 /* only one of prog or link can be specified */
716 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
720 /* mark it deleted, so it's ignored while recomputing effective */
725 err = update_effective_progs(cgrp, atype);
729 /* now can actually delete it from this cgroup list */
732 if (list_empty(progs))
733 /* last program was detached, reset flags to zero */
734 cgrp->bpf.flags[atype] = 0;
736 bpf_prog_put(old_prog);
737 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
741 /* restore back prog or link */
747 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
748 enum bpf_attach_type type)
752 mutex_lock(&cgroup_mutex);
753 ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
754 mutex_unlock(&cgroup_mutex);
758 /* Must be called with cgroup_mutex held to avoid races. */
759 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
760 union bpf_attr __user *uattr)
762 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
763 enum bpf_attach_type type = attr->query.attach_type;
764 enum cgroup_bpf_attach_type atype;
765 struct bpf_prog_array *effective;
766 struct list_head *progs;
767 struct bpf_prog *prog;
771 atype = to_cgroup_bpf_attach_type(type);
775 progs = &cgrp->bpf.progs[atype];
776 flags = cgrp->bpf.flags[atype];
778 effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
779 lockdep_is_held(&cgroup_mutex));
781 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
782 cnt = bpf_prog_array_length(effective);
784 cnt = prog_list_length(progs);
786 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
788 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
790 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
791 /* return early if user requested only program count + flags */
793 if (attr->query.prog_cnt < cnt) {
794 cnt = attr->query.prog_cnt;
798 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
799 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
801 struct bpf_prog_list *pl;
805 list_for_each_entry(pl, progs, node) {
806 prog = prog_list_prog(pl);
808 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
817 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
818 union bpf_attr __user *uattr)
822 mutex_lock(&cgroup_mutex);
823 ret = __cgroup_bpf_query(cgrp, attr, uattr);
824 mutex_unlock(&cgroup_mutex);
828 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
829 enum bpf_prog_type ptype, struct bpf_prog *prog)
831 struct bpf_prog *replace_prog = NULL;
835 cgrp = cgroup_get_from_fd(attr->target_fd);
837 return PTR_ERR(cgrp);
839 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
840 (attr->attach_flags & BPF_F_REPLACE)) {
841 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
842 if (IS_ERR(replace_prog)) {
844 return PTR_ERR(replace_prog);
848 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
849 attr->attach_type, attr->attach_flags);
852 bpf_prog_put(replace_prog);
857 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
859 struct bpf_prog *prog;
863 cgrp = cgroup_get_from_fd(attr->target_fd);
865 return PTR_ERR(cgrp);
867 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
871 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
879 static void bpf_cgroup_link_release(struct bpf_link *link)
881 struct bpf_cgroup_link *cg_link =
882 container_of(link, struct bpf_cgroup_link, link);
885 /* link might have been auto-detached by dying cgroup already,
886 * in that case our work is done here
888 if (!cg_link->cgroup)
891 mutex_lock(&cgroup_mutex);
893 /* re-check cgroup under lock again */
894 if (!cg_link->cgroup) {
895 mutex_unlock(&cgroup_mutex);
899 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
902 cg = cg_link->cgroup;
903 cg_link->cgroup = NULL;
905 mutex_unlock(&cgroup_mutex);
910 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
912 struct bpf_cgroup_link *cg_link =
913 container_of(link, struct bpf_cgroup_link, link);
918 static int bpf_cgroup_link_detach(struct bpf_link *link)
920 bpf_cgroup_link_release(link);
925 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
926 struct seq_file *seq)
928 struct bpf_cgroup_link *cg_link =
929 container_of(link, struct bpf_cgroup_link, link);
932 mutex_lock(&cgroup_mutex);
934 cg_id = cgroup_id(cg_link->cgroup);
935 mutex_unlock(&cgroup_mutex);
939 "attach_type:\t%d\n",
944 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
945 struct bpf_link_info *info)
947 struct bpf_cgroup_link *cg_link =
948 container_of(link, struct bpf_cgroup_link, link);
951 mutex_lock(&cgroup_mutex);
953 cg_id = cgroup_id(cg_link->cgroup);
954 mutex_unlock(&cgroup_mutex);
956 info->cgroup.cgroup_id = cg_id;
957 info->cgroup.attach_type = cg_link->type;
961 static const struct bpf_link_ops bpf_cgroup_link_lops = {
962 .release = bpf_cgroup_link_release,
963 .dealloc = bpf_cgroup_link_dealloc,
964 .detach = bpf_cgroup_link_detach,
965 .update_prog = cgroup_bpf_replace,
966 .show_fdinfo = bpf_cgroup_link_show_fdinfo,
967 .fill_link_info = bpf_cgroup_link_fill_link_info,
970 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
972 struct bpf_link_primer link_primer;
973 struct bpf_cgroup_link *link;
977 if (attr->link_create.flags)
980 cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
982 return PTR_ERR(cgrp);
984 link = kzalloc(sizeof(*link), GFP_USER);
989 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
992 link->type = attr->link_create.attach_type;
994 err = bpf_link_prime(&link->link, &link_primer);
1000 err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1001 link->type, BPF_F_ALLOW_MULTI);
1003 bpf_link_cleanup(&link_primer);
1004 goto out_put_cgroup;
1007 return bpf_link_settle(&link_primer);
1014 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1015 union bpf_attr __user *uattr)
1017 struct cgroup *cgrp;
1020 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1022 return PTR_ERR(cgrp);
1024 ret = cgroup_bpf_query(cgrp, attr, uattr);
1031 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1032 * @sk: The socket sending or receiving traffic
1033 * @skb: The skb that is being sent or received
1034 * @type: The type of program to be exectuted
1036 * If no socket is passed, or the socket is not of type INET or INET6,
1037 * this function does nothing and returns 0.
1039 * The program type passed in via @type must be suitable for network
1040 * filtering. No further check is performed to assert that.
1042 * For egress packets, this function can return:
1043 * NET_XMIT_SUCCESS (0) - continue with packet output
1044 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
1045 * NET_XMIT_CN (2) - continue with packet output and notify TCP
1047 * -EPERM - drop packet
1049 * For ingress packets, this function will return -EPERM if any
1050 * attached program was found and if it returned != 1 during execution.
1051 * Otherwise 0 is returned.
1053 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1054 struct sk_buff *skb,
1055 enum cgroup_bpf_attach_type atype)
1057 unsigned int offset = skb->data - skb_network_header(skb);
1058 struct sock *save_sk;
1059 void *saved_data_end;
1060 struct cgroup *cgrp;
1063 if (!sk || !sk_fullsock(sk))
1066 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1069 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1072 __skb_push(skb, offset);
1074 /* compute pointers for the bpf prog */
1075 bpf_compute_and_save_data_end(skb, &saved_data_end);
1077 if (atype == CGROUP_INET_EGRESS) {
1078 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
1079 cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb);
1081 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
1082 __bpf_prog_run_save_cb);
1083 ret = (ret == 1 ? 0 : -EPERM);
1085 bpf_restore_data_end(skb, saved_data_end);
1086 __skb_pull(skb, offset);
1091 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1094 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1095 * @sk: sock structure to manipulate
1096 * @type: The type of program to be exectuted
1098 * socket is passed is expected to be of type INET or INET6.
1100 * The program type passed in via @type must be suitable for sock
1101 * filtering. No further check is performed to assert that.
1103 * This function will return %-EPERM if any if an attached program was found
1104 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1106 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1107 enum cgroup_bpf_attach_type atype)
1109 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1112 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run);
1113 return ret == 1 ? 0 : -EPERM;
1115 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1118 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1119 * provided by user sockaddr
1120 * @sk: sock struct that will use sockaddr
1121 * @uaddr: sockaddr struct provided by user
1122 * @type: The type of program to be exectuted
1123 * @t_ctx: Pointer to attach type specific context
1124 * @flags: Pointer to u32 which contains higher bits of BPF program
1125 * return value (OR'ed together).
1127 * socket is expected to be of type INET or INET6.
1129 * This function will return %-EPERM if an attached program is found and
1130 * returned value != 1 during execution. In all other cases, 0 is returned.
1132 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1133 struct sockaddr *uaddr,
1134 enum cgroup_bpf_attach_type atype,
1138 struct bpf_sock_addr_kern ctx = {
1143 struct sockaddr_storage unspec;
1144 struct cgroup *cgrp;
1147 /* Check socket family since not all sockets represent network
1148 * endpoint (e.g. AF_UNIX).
1150 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1154 memset(&unspec, 0, sizeof(unspec));
1155 ctx.uaddr = (struct sockaddr *)&unspec;
1158 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1159 ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
1160 bpf_prog_run, flags);
1162 return ret == 1 ? 0 : -EPERM;
1164 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1167 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1168 * @sk: socket to get cgroup from
1169 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1170 * sk with connection information (IP addresses, etc.) May not contain
1171 * cgroup info if it is a req sock.
1172 * @type: The type of program to be exectuted
1174 * socket passed is expected to be of type INET or INET6.
1176 * The program type passed in via @type must be suitable for sock_ops
1177 * filtering. No further check is performed to assert that.
1179 * This function will return %-EPERM if any if an attached program was found
1180 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1182 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1183 struct bpf_sock_ops_kern *sock_ops,
1184 enum cgroup_bpf_attach_type atype)
1186 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1189 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
1191 return ret == 1 ? 0 : -EPERM;
1193 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1195 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1196 short access, enum cgroup_bpf_attach_type atype)
1198 struct cgroup *cgrp;
1199 struct bpf_cgroup_dev_ctx ctx = {
1200 .access_type = (access << 16) | dev_type,
1207 cgrp = task_dfl_cgroup(current);
1208 allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
1215 static const struct bpf_func_proto *
1216 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1219 case BPF_FUNC_get_current_uid_gid:
1220 return &bpf_get_current_uid_gid_proto;
1221 case BPF_FUNC_get_local_storage:
1222 return &bpf_get_local_storage_proto;
1223 case BPF_FUNC_get_current_cgroup_id:
1224 return &bpf_get_current_cgroup_id_proto;
1225 case BPF_FUNC_perf_event_output:
1226 return &bpf_event_output_data_proto;
1228 return bpf_base_func_proto(func_id);
1232 static const struct bpf_func_proto *
1233 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1235 return cgroup_base_func_proto(func_id, prog);
1238 static bool cgroup_dev_is_valid_access(int off, int size,
1239 enum bpf_access_type type,
1240 const struct bpf_prog *prog,
1241 struct bpf_insn_access_aux *info)
1243 const int size_default = sizeof(__u32);
1245 if (type == BPF_WRITE)
1248 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1250 /* The verifier guarantees that size > 0. */
1251 if (off % size != 0)
1255 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1256 bpf_ctx_record_field_size(info, size_default);
1257 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1261 if (size != size_default)
1268 const struct bpf_prog_ops cg_dev_prog_ops = {
1271 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1272 .get_func_proto = cgroup_dev_func_proto,
1273 .is_valid_access = cgroup_dev_is_valid_access,
1277 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1279 * @head: sysctl table header
1280 * @table: sysctl table
1281 * @write: sysctl is being read (= 0) or written (= 1)
1282 * @buf: pointer to buffer (in and out)
1283 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1284 * result is size of @new_buf if program set new value, initial value
1286 * @ppos: value-result argument: value is position at which read from or write
1287 * to sysctl is happening, result is new position if program overrode it,
1288 * initial value otherwise
1289 * @type: type of program to be executed
1291 * Program is run when sysctl is being accessed, either read or written, and
1292 * can allow or deny such access.
1294 * This function will return %-EPERM if an attached program is found and
1295 * returned value != 1 during execution. In all other cases 0 is returned.
1297 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1298 struct ctl_table *table, int write,
1299 char **buf, size_t *pcount, loff_t *ppos,
1300 enum cgroup_bpf_attach_type atype)
1302 struct bpf_sysctl_kern ctx = {
1308 .cur_len = PAGE_SIZE,
1313 struct cgroup *cgrp;
1317 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1319 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1320 /* Let BPF program decide how to proceed. */
1324 if (write && *buf && *pcount) {
1325 /* BPF program should be able to override new value with a
1326 * buffer bigger than provided by user.
1328 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1329 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1331 memcpy(ctx.new_val, *buf, ctx.new_len);
1333 /* Let BPF program decide how to proceed. */
1339 cgrp = task_dfl_cgroup(current);
1340 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, bpf_prog_run);
1345 if (ret == 1 && ctx.new_updated) {
1348 *pcount = ctx.new_len;
1353 return ret == 1 ? 0 : -EPERM;
1357 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
1358 enum cgroup_bpf_attach_type attach_type)
1360 struct bpf_prog_array *prog_array;
1364 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
1365 empty = bpf_prog_array_is_empty(prog_array);
1371 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1372 struct bpf_sockopt_buf *buf)
1374 if (unlikely(max_optlen < 0))
1377 if (unlikely(max_optlen > PAGE_SIZE)) {
1378 /* We don't expose optvals that are greater than PAGE_SIZE
1379 * to the BPF program.
1381 max_optlen = PAGE_SIZE;
1384 if (max_optlen <= sizeof(buf->data)) {
1385 /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1386 * bytes avoid the cost of kzalloc.
1388 ctx->optval = buf->data;
1389 ctx->optval_end = ctx->optval + max_optlen;
1393 ctx->optval = kzalloc(max_optlen, GFP_USER);
1397 ctx->optval_end = ctx->optval + max_optlen;
1402 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1403 struct bpf_sockopt_buf *buf)
1405 if (ctx->optval == buf->data)
1410 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1411 struct bpf_sockopt_buf *buf)
1413 return ctx->optval != buf->data;
1416 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1417 int *optname, char __user *optval,
1418 int *optlen, char **kernel_optval)
1420 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1421 struct bpf_sockopt_buf buf = {};
1422 struct bpf_sockopt_kern ctx = {
1425 .optname = *optname,
1427 int ret, max_optlen;
1429 /* Opportunistic check to see whether we have any BPF program
1430 * attached to the hook so we don't waste time allocating
1431 * memory and locking the socket.
1433 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT))
1436 /* Allocate a bit more than the initial user buffer for
1437 * BPF program. The canonical use case is overriding
1438 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1440 max_optlen = max_t(int, 16, *optlen);
1442 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1446 ctx.optlen = *optlen;
1448 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1454 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT],
1455 &ctx, bpf_prog_run);
1463 if (ctx.optlen == -1) {
1464 /* optlen set to -1, bypass kernel */
1466 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1467 /* optlen is out of bounds */
1470 /* optlen within bounds, run kernel handler */
1473 /* export any potential modifications */
1475 *optname = ctx.optname;
1477 /* optlen == 0 from BPF indicates that we should
1478 * use original userspace data.
1480 if (ctx.optlen != 0) {
1481 *optlen = ctx.optlen;
1482 /* We've used bpf_sockopt_kern->buf as an intermediary
1483 * storage, but the BPF program indicates that we need
1484 * to pass this data to the kernel setsockopt handler.
1485 * No way to export on-stack buf, have to allocate a
1488 if (!sockopt_buf_allocated(&ctx, &buf)) {
1489 void *p = kmalloc(ctx.optlen, GFP_USER);
1495 memcpy(p, ctx.optval, ctx.optlen);
1498 *kernel_optval = ctx.optval;
1500 /* export and don't free sockopt buf */
1506 sockopt_free_buf(&ctx, &buf);
1510 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1511 int optname, char __user *optval,
1512 int __user *optlen, int max_optlen,
1515 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1516 struct bpf_sockopt_buf buf = {};
1517 struct bpf_sockopt_kern ctx = {
1525 /* Opportunistic check to see whether we have any BPF program
1526 * attached to the hook so we don't waste time allocating
1527 * memory and locking the socket.
1529 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT))
1532 ctx.optlen = max_optlen;
1534 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1539 /* If kernel getsockopt finished successfully,
1540 * copy whatever was returned to the user back
1541 * into our temporary buffer. Set optlen to the
1542 * one that kernel returned as well to let
1543 * BPF programs inspect the value.
1546 if (get_user(ctx.optlen, optlen)) {
1551 if (ctx.optlen < 0) {
1556 if (copy_from_user(ctx.optval, optval,
1557 min(ctx.optlen, max_optlen)) != 0) {
1564 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1565 &ctx, bpf_prog_run);
1573 if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1578 /* BPF programs only allowed to set retval to 0, not some
1581 if (ctx.retval != 0 && ctx.retval != retval) {
1586 if (ctx.optlen != 0) {
1587 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1588 put_user(ctx.optlen, optlen)) {
1597 sockopt_free_buf(&ctx, &buf);
1601 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1602 int optname, void *optval,
1603 int *optlen, int retval)
1605 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1606 struct bpf_sockopt_kern ctx = {
1613 .optval_end = optval + *optlen,
1617 /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1618 * user data back into BPF buffer when reval != 0. This is
1619 * done as an optimization to avoid extra copy, assuming
1620 * kernel won't populate the data in case of an error.
1621 * Here we always pass the data and memset() should
1622 * be called if that data shouldn't be "exported".
1625 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1626 &ctx, bpf_prog_run);
1630 if (ctx.optlen > *optlen)
1633 /* BPF programs only allowed to set retval to 0, not some
1636 if (ctx.retval != 0 && ctx.retval != retval)
1639 /* BPF programs can shrink the buffer, export the modifications.
1641 if (ctx.optlen != 0)
1642 *optlen = ctx.optlen;
1648 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1651 ssize_t tmp_ret = 0, ret;
1653 if (dir->header.parent) {
1654 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1659 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1666 /* Avoid leading slash. */
1670 tmp_ret = strscpy(*bufp, "/", *lenp);
1676 return ret + tmp_ret;
1679 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1680 size_t, buf_len, u64, flags)
1682 ssize_t tmp_ret = 0, ret;
1687 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1690 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1695 ret = strscpy(buf, ctx->table->procname, buf_len);
1697 return ret < 0 ? ret : tmp_ret + ret;
1700 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1701 .func = bpf_sysctl_get_name,
1703 .ret_type = RET_INTEGER,
1704 .arg1_type = ARG_PTR_TO_CTX,
1705 .arg2_type = ARG_PTR_TO_MEM,
1706 .arg3_type = ARG_CONST_SIZE,
1707 .arg4_type = ARG_ANYTHING,
1710 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1719 if (!src || !src_len) {
1720 memset(dst, 0, dst_len);
1724 memcpy(dst, src, min(dst_len, src_len));
1726 if (dst_len > src_len) {
1727 memset(dst + src_len, '\0', dst_len - src_len);
1731 dst[dst_len - 1] = '\0';
1736 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1737 char *, buf, size_t, buf_len)
1739 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1742 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1743 .func = bpf_sysctl_get_current_value,
1745 .ret_type = RET_INTEGER,
1746 .arg1_type = ARG_PTR_TO_CTX,
1747 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1748 .arg3_type = ARG_CONST_SIZE,
1751 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1756 memset(buf, '\0', buf_len);
1759 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1762 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1763 .func = bpf_sysctl_get_new_value,
1765 .ret_type = RET_INTEGER,
1766 .arg1_type = ARG_PTR_TO_CTX,
1767 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1768 .arg3_type = ARG_CONST_SIZE,
1771 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1772 const char *, buf, size_t, buf_len)
1774 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1777 if (buf_len > PAGE_SIZE - 1)
1780 memcpy(ctx->new_val, buf, buf_len);
1781 ctx->new_len = buf_len;
1782 ctx->new_updated = 1;
1787 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1788 .func = bpf_sysctl_set_new_value,
1790 .ret_type = RET_INTEGER,
1791 .arg1_type = ARG_PTR_TO_CTX,
1792 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1793 .arg3_type = ARG_CONST_SIZE,
1796 static const struct bpf_func_proto *
1797 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1800 case BPF_FUNC_strtol:
1801 return &bpf_strtol_proto;
1802 case BPF_FUNC_strtoul:
1803 return &bpf_strtoul_proto;
1804 case BPF_FUNC_sysctl_get_name:
1805 return &bpf_sysctl_get_name_proto;
1806 case BPF_FUNC_sysctl_get_current_value:
1807 return &bpf_sysctl_get_current_value_proto;
1808 case BPF_FUNC_sysctl_get_new_value:
1809 return &bpf_sysctl_get_new_value_proto;
1810 case BPF_FUNC_sysctl_set_new_value:
1811 return &bpf_sysctl_set_new_value_proto;
1812 case BPF_FUNC_ktime_get_coarse_ns:
1813 return &bpf_ktime_get_coarse_ns_proto;
1815 return cgroup_base_func_proto(func_id, prog);
1819 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1820 const struct bpf_prog *prog,
1821 struct bpf_insn_access_aux *info)
1823 const int size_default = sizeof(__u32);
1825 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1829 case bpf_ctx_range(struct bpf_sysctl, write):
1830 if (type != BPF_READ)
1832 bpf_ctx_record_field_size(info, size_default);
1833 return bpf_ctx_narrow_access_ok(off, size, size_default);
1834 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1835 if (type == BPF_READ) {
1836 bpf_ctx_record_field_size(info, size_default);
1837 return bpf_ctx_narrow_access_ok(off, size, size_default);
1839 return size == size_default;
1846 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1847 const struct bpf_insn *si,
1848 struct bpf_insn *insn_buf,
1849 struct bpf_prog *prog, u32 *target_size)
1851 struct bpf_insn *insn = insn_buf;
1855 case offsetof(struct bpf_sysctl, write):
1856 *insn++ = BPF_LDX_MEM(
1857 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1858 bpf_target_off(struct bpf_sysctl_kern, write,
1859 sizeof_field(struct bpf_sysctl_kern,
1863 case offsetof(struct bpf_sysctl, file_pos):
1864 /* ppos is a pointer so it should be accessed via indirect
1865 * loads and stores. Also for stores additional temporary
1866 * register is used since neither src_reg nor dst_reg can be
1869 if (type == BPF_WRITE) {
1870 int treg = BPF_REG_9;
1872 if (si->src_reg == treg || si->dst_reg == treg)
1874 if (si->src_reg == treg || si->dst_reg == treg)
1876 *insn++ = BPF_STX_MEM(
1877 BPF_DW, si->dst_reg, treg,
1878 offsetof(struct bpf_sysctl_kern, tmp_reg));
1879 *insn++ = BPF_LDX_MEM(
1880 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1882 offsetof(struct bpf_sysctl_kern, ppos));
1883 *insn++ = BPF_STX_MEM(
1884 BPF_SIZEOF(u32), treg, si->src_reg,
1885 bpf_ctx_narrow_access_offset(
1886 0, sizeof(u32), sizeof(loff_t)));
1887 *insn++ = BPF_LDX_MEM(
1888 BPF_DW, treg, si->dst_reg,
1889 offsetof(struct bpf_sysctl_kern, tmp_reg));
1891 *insn++ = BPF_LDX_MEM(
1892 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1893 si->dst_reg, si->src_reg,
1894 offsetof(struct bpf_sysctl_kern, ppos));
1895 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1896 *insn++ = BPF_LDX_MEM(
1897 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1898 bpf_ctx_narrow_access_offset(
1899 0, read_size, sizeof(loff_t)));
1901 *target_size = sizeof(u32);
1905 return insn - insn_buf;
1908 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1909 .get_func_proto = sysctl_func_proto,
1910 .is_valid_access = sysctl_is_valid_access,
1911 .convert_ctx_access = sysctl_convert_ctx_access,
1914 const struct bpf_prog_ops cg_sysctl_prog_ops = {
1918 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
1920 const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
1922 return net->net_cookie;
1925 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
1926 .func = bpf_get_netns_cookie_sockopt,
1928 .ret_type = RET_INTEGER,
1929 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
1933 static const struct bpf_func_proto *
1934 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1938 case BPF_FUNC_get_netns_cookie:
1939 return &bpf_get_netns_cookie_sockopt_proto;
1940 case BPF_FUNC_sk_storage_get:
1941 return &bpf_sk_storage_get_proto;
1942 case BPF_FUNC_sk_storage_delete:
1943 return &bpf_sk_storage_delete_proto;
1944 case BPF_FUNC_setsockopt:
1945 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1946 return &bpf_sk_setsockopt_proto;
1948 case BPF_FUNC_getsockopt:
1949 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1950 return &bpf_sk_getsockopt_proto;
1954 case BPF_FUNC_tcp_sock:
1955 return &bpf_tcp_sock_proto;
1958 return cgroup_base_func_proto(func_id, prog);
1962 static bool cg_sockopt_is_valid_access(int off, int size,
1963 enum bpf_access_type type,
1964 const struct bpf_prog *prog,
1965 struct bpf_insn_access_aux *info)
1967 const int size_default = sizeof(__u32);
1969 if (off < 0 || off >= sizeof(struct bpf_sockopt))
1972 if (off % size != 0)
1975 if (type == BPF_WRITE) {
1977 case offsetof(struct bpf_sockopt, retval):
1978 if (size != size_default)
1980 return prog->expected_attach_type ==
1981 BPF_CGROUP_GETSOCKOPT;
1982 case offsetof(struct bpf_sockopt, optname):
1984 case offsetof(struct bpf_sockopt, level):
1985 if (size != size_default)
1987 return prog->expected_attach_type ==
1988 BPF_CGROUP_SETSOCKOPT;
1989 case offsetof(struct bpf_sockopt, optlen):
1990 return size == size_default;
1997 case offsetof(struct bpf_sockopt, sk):
1998 if (size != sizeof(__u64))
2000 info->reg_type = PTR_TO_SOCKET;
2002 case offsetof(struct bpf_sockopt, optval):
2003 if (size != sizeof(__u64))
2005 info->reg_type = PTR_TO_PACKET;
2007 case offsetof(struct bpf_sockopt, optval_end):
2008 if (size != sizeof(__u64))
2010 info->reg_type = PTR_TO_PACKET_END;
2012 case offsetof(struct bpf_sockopt, retval):
2013 if (size != size_default)
2015 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2017 if (size != size_default)
2024 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
2025 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
2026 si->dst_reg, si->src_reg, \
2027 offsetof(struct bpf_sockopt_kern, F))
2029 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2030 const struct bpf_insn *si,
2031 struct bpf_insn *insn_buf,
2032 struct bpf_prog *prog,
2035 struct bpf_insn *insn = insn_buf;
2038 case offsetof(struct bpf_sockopt, sk):
2039 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2041 case offsetof(struct bpf_sockopt, level):
2042 if (type == BPF_WRITE)
2043 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2045 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2047 case offsetof(struct bpf_sockopt, optname):
2048 if (type == BPF_WRITE)
2049 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2051 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2053 case offsetof(struct bpf_sockopt, optlen):
2054 if (type == BPF_WRITE)
2055 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2057 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2059 case offsetof(struct bpf_sockopt, retval):
2060 if (type == BPF_WRITE)
2061 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
2063 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
2065 case offsetof(struct bpf_sockopt, optval):
2066 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2068 case offsetof(struct bpf_sockopt, optval_end):
2069 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2073 return insn - insn_buf;
2076 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2078 const struct bpf_prog *prog)
2080 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
2085 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2086 .get_func_proto = cg_sockopt_func_proto,
2087 .is_valid_access = cg_sockopt_is_valid_access,
2088 .convert_ctx_access = cg_sockopt_convert_ctx_access,
2089 .gen_prologue = cg_sockopt_get_prologue,
2092 const struct bpf_prog_ops cg_sockopt_prog_ops = {