1 // SPDX-License-Identifier: GPL-2.0-only
3 * Functions to manage eBPF programs attached to cgroups
5 * Copyright (c) 2016 Daniel Mack
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
18 #include <net/bpf_sk_storage.h>
20 #include "../cgroup/cgroup-internal.h"
22 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
25 /* __always_inline is necessary to prevent indirect call through run_prog
28 static __always_inline int
29 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
30 enum cgroup_bpf_attach_type atype,
31 const void *ctx, bpf_prog_run_fn run_prog,
32 int retval, u32 *ret_flags)
34 const struct bpf_prog_array_item *item;
35 const struct bpf_prog *prog;
36 const struct bpf_prog_array *array;
37 struct bpf_run_ctx *old_run_ctx;
38 struct bpf_cg_run_ctx run_ctx;
41 run_ctx.retval = retval;
44 array = rcu_dereference(cgrp->effective[atype]);
45 item = &array->items[0];
46 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
47 while ((prog = READ_ONCE(item->prog))) {
48 run_ctx.prog_item = item;
49 func_ret = run_prog(prog, ctx);
51 *(ret_flags) |= (func_ret >> 1);
54 if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
55 run_ctx.retval = -EPERM;
58 bpf_reset_run_ctx(old_run_ctx);
61 return run_ctx.retval;
64 void cgroup_bpf_offline(struct cgroup *cgrp)
67 percpu_ref_kill(&cgrp->bpf.refcnt);
70 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
72 enum bpf_cgroup_storage_type stype;
74 for_each_cgroup_storage_type(stype)
75 bpf_cgroup_storage_free(storages[stype]);
78 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
79 struct bpf_cgroup_storage *new_storages[],
80 enum bpf_attach_type type,
81 struct bpf_prog *prog,
84 enum bpf_cgroup_storage_type stype;
85 struct bpf_cgroup_storage_key key;
88 key.cgroup_inode_id = cgroup_id(cgrp);
89 key.attach_type = type;
91 for_each_cgroup_storage_type(stype) {
92 map = prog->aux->cgroup_storage[stype];
96 storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
100 storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
101 if (IS_ERR(storages[stype])) {
102 bpf_cgroup_storages_free(new_storages);
106 new_storages[stype] = storages[stype];
112 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
113 struct bpf_cgroup_storage *src[])
115 enum bpf_cgroup_storage_type stype;
117 for_each_cgroup_storage_type(stype)
118 dst[stype] = src[stype];
121 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
123 enum bpf_attach_type attach_type)
125 enum bpf_cgroup_storage_type stype;
127 for_each_cgroup_storage_type(stype)
128 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
131 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
132 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
133 * doesn't free link memory, which will eventually be done by bpf_link's
134 * release() callback, when its last FD is closed.
136 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
138 cgroup_put(link->cgroup);
143 * cgroup_bpf_release() - put references of all bpf programs and
144 * release all cgroup bpf data
145 * @work: work structure embedded into the cgroup to modify
147 static void cgroup_bpf_release(struct work_struct *work)
149 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
151 struct bpf_prog_array *old_array;
152 struct list_head *storages = &cgrp->bpf.storages;
153 struct bpf_cgroup_storage *storage, *stmp;
157 mutex_lock(&cgroup_mutex);
159 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
160 struct list_head *progs = &cgrp->bpf.progs[atype];
161 struct bpf_prog_list *pl, *pltmp;
163 list_for_each_entry_safe(pl, pltmp, progs, node) {
166 bpf_prog_put(pl->prog);
168 bpf_cgroup_link_auto_detach(pl->link);
170 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
172 old_array = rcu_dereference_protected(
173 cgrp->bpf.effective[atype],
174 lockdep_is_held(&cgroup_mutex));
175 bpf_prog_array_free(old_array);
178 list_for_each_entry_safe(storage, stmp, storages, list_cg) {
179 bpf_cgroup_storage_unlink(storage);
180 bpf_cgroup_storage_free(storage);
183 mutex_unlock(&cgroup_mutex);
185 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
188 percpu_ref_exit(&cgrp->bpf.refcnt);
193 * cgroup_bpf_release_fn() - callback used to schedule releasing
195 * @ref: percpu ref counter structure
197 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
199 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
201 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
202 queue_work(system_wq, &cgrp->bpf.release_work);
205 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
206 * link or direct prog.
208 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
213 return pl->link->link.prog;
217 /* count number of elements in the list.
218 * it's slow but the list cannot be long
220 static u32 prog_list_length(struct list_head *head)
222 struct bpf_prog_list *pl;
225 list_for_each_entry(pl, head, node) {
226 if (!prog_list_prog(pl))
233 /* if parent has non-overridable prog attached,
234 * disallow attaching new programs to the descendent cgroup.
235 * if parent has overridable or multi-prog, allow attaching
237 static bool hierarchy_allows_attach(struct cgroup *cgrp,
238 enum cgroup_bpf_attach_type atype)
242 p = cgroup_parent(cgrp);
246 u32 flags = p->bpf.flags[atype];
249 if (flags & BPF_F_ALLOW_MULTI)
251 cnt = prog_list_length(&p->bpf.progs[atype]);
252 WARN_ON_ONCE(cnt > 1);
254 return !!(flags & BPF_F_ALLOW_OVERRIDE);
255 p = cgroup_parent(p);
260 /* compute a chain of effective programs for a given cgroup:
261 * start from the list of programs in this cgroup and add
262 * all parent programs.
263 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
264 * to programs in this cgroup
266 static int compute_effective_progs(struct cgroup *cgrp,
267 enum cgroup_bpf_attach_type atype,
268 struct bpf_prog_array **array)
270 struct bpf_prog_array_item *item;
271 struct bpf_prog_array *progs;
272 struct bpf_prog_list *pl;
273 struct cgroup *p = cgrp;
276 /* count number of effective programs by walking parents */
278 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
279 cnt += prog_list_length(&p->bpf.progs[atype]);
280 p = cgroup_parent(p);
283 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
287 /* populate the array with effective progs */
291 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
294 list_for_each_entry(pl, &p->bpf.progs[atype], node) {
295 if (!prog_list_prog(pl))
298 item = &progs->items[cnt];
299 item->prog = prog_list_prog(pl);
300 bpf_cgroup_storages_assign(item->cgroup_storage,
304 } while ((p = cgroup_parent(p)));
310 static void activate_effective_progs(struct cgroup *cgrp,
311 enum cgroup_bpf_attach_type atype,
312 struct bpf_prog_array *old_array)
314 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
315 lockdep_is_held(&cgroup_mutex));
316 /* free prog array after grace period, since __cgroup_bpf_run_*()
317 * might be still walking the array
319 bpf_prog_array_free(old_array);
323 * cgroup_bpf_inherit() - inherit effective programs from parent
324 * @cgrp: the cgroup to modify
326 int cgroup_bpf_inherit(struct cgroup *cgrp)
328 /* has to use marco instead of const int, since compiler thinks
329 * that array below is variable length
331 #define NR ARRAY_SIZE(cgrp->bpf.effective)
332 struct bpf_prog_array *arrays[NR] = {};
336 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
341 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
344 for (i = 0; i < NR; i++)
345 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
347 INIT_LIST_HEAD(&cgrp->bpf.storages);
349 for (i = 0; i < NR; i++)
350 if (compute_effective_progs(cgrp, i, &arrays[i]))
353 for (i = 0; i < NR; i++)
354 activate_effective_progs(cgrp, i, arrays[i]);
358 for (i = 0; i < NR; i++)
359 bpf_prog_array_free(arrays[i]);
361 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
364 percpu_ref_exit(&cgrp->bpf.refcnt);
369 static int update_effective_progs(struct cgroup *cgrp,
370 enum cgroup_bpf_attach_type atype)
372 struct cgroup_subsys_state *css;
375 /* allocate and recompute effective prog arrays */
376 css_for_each_descendant_pre(css, &cgrp->self) {
377 struct cgroup *desc = container_of(css, struct cgroup, self);
379 if (percpu_ref_is_zero(&desc->bpf.refcnt))
382 err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
387 /* all allocations were successful. Activate all prog arrays */
388 css_for_each_descendant_pre(css, &cgrp->self) {
389 struct cgroup *desc = container_of(css, struct cgroup, self);
391 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
392 if (unlikely(desc->bpf.inactive)) {
393 bpf_prog_array_free(desc->bpf.inactive);
394 desc->bpf.inactive = NULL;
399 activate_effective_progs(desc, atype, desc->bpf.inactive);
400 desc->bpf.inactive = NULL;
406 /* oom while computing effective. Free all computed effective arrays
407 * since they were not activated
409 css_for_each_descendant_pre(css, &cgrp->self) {
410 struct cgroup *desc = container_of(css, struct cgroup, self);
412 bpf_prog_array_free(desc->bpf.inactive);
413 desc->bpf.inactive = NULL;
419 #define BPF_CGROUP_MAX_PROGS 64
421 static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
422 struct bpf_prog *prog,
423 struct bpf_cgroup_link *link,
424 struct bpf_prog *replace_prog,
427 struct bpf_prog_list *pl;
429 /* single-attach case */
431 if (list_empty(progs))
433 return list_first_entry(progs, typeof(*pl), node);
436 list_for_each_entry(pl, progs, node) {
437 if (prog && pl->prog == prog && prog != replace_prog)
438 /* disallow attaching the same prog twice */
439 return ERR_PTR(-EINVAL);
440 if (link && pl->link == link)
441 /* disallow attaching the same link twice */
442 return ERR_PTR(-EINVAL);
445 /* direct prog multi-attach w/ replacement case */
447 list_for_each_entry(pl, progs, node) {
448 if (pl->prog == replace_prog)
452 /* prog to replace not found for cgroup */
453 return ERR_PTR(-ENOENT);
460 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
461 * propagate the change to descendants
462 * @cgrp: The cgroup which descendants to traverse
463 * @prog: A program to attach
464 * @link: A link to attach
465 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
466 * @type: Type of attach operation
467 * @flags: Option flags
469 * Exactly one of @prog or @link can be non-null.
470 * Must be called with cgroup_mutex held.
472 static int __cgroup_bpf_attach(struct cgroup *cgrp,
473 struct bpf_prog *prog, struct bpf_prog *replace_prog,
474 struct bpf_cgroup_link *link,
475 enum bpf_attach_type type, u32 flags)
477 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
478 struct bpf_prog *old_prog = NULL;
479 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
480 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
481 enum cgroup_bpf_attach_type atype;
482 struct bpf_prog_list *pl;
483 struct list_head *progs;
486 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
487 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
488 /* invalid combination */
490 if (link && (prog || replace_prog))
491 /* only either link or prog/replace_prog can be specified */
493 if (!!replace_prog != !!(flags & BPF_F_REPLACE))
494 /* replace_prog implies BPF_F_REPLACE, and vice versa */
497 atype = to_cgroup_bpf_attach_type(type);
501 progs = &cgrp->bpf.progs[atype];
503 if (!hierarchy_allows_attach(cgrp, atype))
506 if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
507 /* Disallow attaching non-overridable on top
508 * of existing overridable in this cgroup.
509 * Disallow attaching multi-prog if overridable or none
513 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
516 pl = find_attach_entry(progs, prog, link, replace_prog,
517 flags & BPF_F_ALLOW_MULTI);
521 if (bpf_cgroup_storages_alloc(storage, new_storage, type,
522 prog ? : link->link.prog, cgrp))
528 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
530 bpf_cgroup_storages_free(new_storage);
533 list_add_tail(&pl->node, progs);
538 bpf_cgroup_storages_assign(pl->storage, storage);
539 cgrp->bpf.flags[atype] = saved_flags;
541 err = update_effective_progs(cgrp, atype);
546 bpf_prog_put(old_prog);
548 static_branch_inc(&cgroup_bpf_enabled_key[atype]);
549 bpf_cgroup_storages_link(new_storage, cgrp, type);
557 bpf_cgroup_storages_free(new_storage);
565 static int cgroup_bpf_attach(struct cgroup *cgrp,
566 struct bpf_prog *prog, struct bpf_prog *replace_prog,
567 struct bpf_cgroup_link *link,
568 enum bpf_attach_type type,
573 mutex_lock(&cgroup_mutex);
574 ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
575 mutex_unlock(&cgroup_mutex);
579 /* Swap updated BPF program for given link in effective program arrays across
580 * all descendant cgroups. This function is guaranteed to succeed.
582 static void replace_effective_prog(struct cgroup *cgrp,
583 enum cgroup_bpf_attach_type atype,
584 struct bpf_cgroup_link *link)
586 struct bpf_prog_array_item *item;
587 struct cgroup_subsys_state *css;
588 struct bpf_prog_array *progs;
589 struct bpf_prog_list *pl;
590 struct list_head *head;
594 css_for_each_descendant_pre(css, &cgrp->self) {
595 struct cgroup *desc = container_of(css, struct cgroup, self);
597 if (percpu_ref_is_zero(&desc->bpf.refcnt))
600 /* find position of link in effective progs array */
601 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
602 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
605 head = &cg->bpf.progs[atype];
606 list_for_each_entry(pl, head, node) {
607 if (!prog_list_prog(pl))
609 if (pl->link == link)
616 progs = rcu_dereference_protected(
617 desc->bpf.effective[atype],
618 lockdep_is_held(&cgroup_mutex));
619 item = &progs->items[pos];
620 WRITE_ONCE(item->prog, link->link.prog);
625 * __cgroup_bpf_replace() - Replace link's program and propagate the change
627 * @cgrp: The cgroup which descendants to traverse
628 * @link: A link for which to replace BPF program
629 * @type: Type of attach operation
631 * Must be called with cgroup_mutex held.
633 static int __cgroup_bpf_replace(struct cgroup *cgrp,
634 struct bpf_cgroup_link *link,
635 struct bpf_prog *new_prog)
637 enum cgroup_bpf_attach_type atype;
638 struct bpf_prog *old_prog;
639 struct bpf_prog_list *pl;
640 struct list_head *progs;
643 atype = to_cgroup_bpf_attach_type(link->type);
647 progs = &cgrp->bpf.progs[atype];
649 if (link->link.prog->type != new_prog->type)
652 list_for_each_entry(pl, progs, node) {
653 if (pl->link == link) {
661 old_prog = xchg(&link->link.prog, new_prog);
662 replace_effective_prog(cgrp, atype, link);
663 bpf_prog_put(old_prog);
667 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
668 struct bpf_prog *old_prog)
670 struct bpf_cgroup_link *cg_link;
673 cg_link = container_of(link, struct bpf_cgroup_link, link);
675 mutex_lock(&cgroup_mutex);
676 /* link might have been auto-released by dying cgroup, so fail */
677 if (!cg_link->cgroup) {
681 if (old_prog && link->prog != old_prog) {
685 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
687 mutex_unlock(&cgroup_mutex);
691 static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
692 struct bpf_prog *prog,
693 struct bpf_cgroup_link *link,
696 struct bpf_prog_list *pl;
699 if (list_empty(progs))
700 /* report error when trying to detach and nothing is attached */
701 return ERR_PTR(-ENOENT);
703 /* to maintain backward compatibility NONE and OVERRIDE cgroups
704 * allow detaching with invalid FD (prog==NULL) in legacy mode
706 return list_first_entry(progs, typeof(*pl), node);
710 /* to detach MULTI prog the user has to specify valid FD
711 * of the program or link to be detached
713 return ERR_PTR(-EINVAL);
715 /* find the prog or link and detach it */
716 list_for_each_entry(pl, progs, node) {
717 if (pl->prog == prog && pl->link == link)
720 return ERR_PTR(-ENOENT);
724 * purge_effective_progs() - After compute_effective_progs fails to alloc new
725 * cgrp->bpf.inactive table we can recover by
726 * recomputing the array in place.
728 * @cgrp: The cgroup which descendants to travers
729 * @prog: A program to detach or NULL
730 * @link: A link to detach or NULL
731 * @atype: Type of detach operation
733 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
734 struct bpf_cgroup_link *link,
735 enum cgroup_bpf_attach_type atype)
737 struct cgroup_subsys_state *css;
738 struct bpf_prog_array *progs;
739 struct bpf_prog_list *pl;
740 struct list_head *head;
744 /* recompute effective prog array in place */
745 css_for_each_descendant_pre(css, &cgrp->self) {
746 struct cgroup *desc = container_of(css, struct cgroup, self);
748 if (percpu_ref_is_zero(&desc->bpf.refcnt))
751 /* find position of link or prog in effective progs array */
752 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
753 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
756 head = &cg->bpf.progs[atype];
757 list_for_each_entry(pl, head, node) {
758 if (!prog_list_prog(pl))
760 if (pl->prog == prog && pl->link == link)
766 /* no link or prog match, skip the cgroup of this layer */
769 progs = rcu_dereference_protected(
770 desc->bpf.effective[atype],
771 lockdep_is_held(&cgroup_mutex));
773 /* Remove the program from the array */
774 WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
775 "Failed to purge a prog from array at index %d", pos);
780 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
781 * propagate the change to descendants
782 * @cgrp: The cgroup which descendants to traverse
783 * @prog: A program to detach or NULL
784 * @link: A link to detach or NULL
785 * @type: Type of detach operation
787 * At most one of @prog or @link can be non-NULL.
788 * Must be called with cgroup_mutex held.
790 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
791 struct bpf_cgroup_link *link, enum bpf_attach_type type)
793 enum cgroup_bpf_attach_type atype;
794 struct bpf_prog *old_prog;
795 struct bpf_prog_list *pl;
796 struct list_head *progs;
799 atype = to_cgroup_bpf_attach_type(type);
803 progs = &cgrp->bpf.progs[atype];
804 flags = cgrp->bpf.flags[atype];
807 /* only one of prog or link can be specified */
810 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
814 /* mark it deleted, so it's ignored while recomputing effective */
819 if (update_effective_progs(cgrp, atype)) {
820 /* if update effective array failed replace the prog with a dummy prog*/
823 purge_effective_progs(cgrp, old_prog, link, atype);
826 /* now can actually delete it from this cgroup list */
829 if (list_empty(progs))
830 /* last program was detached, reset flags to zero */
831 cgrp->bpf.flags[atype] = 0;
833 bpf_prog_put(old_prog);
834 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
838 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
839 enum bpf_attach_type type)
843 mutex_lock(&cgroup_mutex);
844 ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
845 mutex_unlock(&cgroup_mutex);
849 /* Must be called with cgroup_mutex held to avoid races. */
850 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
851 union bpf_attr __user *uattr)
853 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
854 enum bpf_attach_type type = attr->query.attach_type;
855 enum cgroup_bpf_attach_type atype;
856 struct bpf_prog_array *effective;
857 struct list_head *progs;
858 struct bpf_prog *prog;
862 atype = to_cgroup_bpf_attach_type(type);
866 progs = &cgrp->bpf.progs[atype];
867 flags = cgrp->bpf.flags[atype];
869 effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
870 lockdep_is_held(&cgroup_mutex));
872 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
873 cnt = bpf_prog_array_length(effective);
875 cnt = prog_list_length(progs);
877 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
879 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
881 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
882 /* return early if user requested only program count + flags */
884 if (attr->query.prog_cnt < cnt) {
885 cnt = attr->query.prog_cnt;
889 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
890 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
892 struct bpf_prog_list *pl;
896 list_for_each_entry(pl, progs, node) {
897 prog = prog_list_prog(pl);
899 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
908 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
909 union bpf_attr __user *uattr)
913 mutex_lock(&cgroup_mutex);
914 ret = __cgroup_bpf_query(cgrp, attr, uattr);
915 mutex_unlock(&cgroup_mutex);
919 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
920 enum bpf_prog_type ptype, struct bpf_prog *prog)
922 struct bpf_prog *replace_prog = NULL;
926 cgrp = cgroup_get_from_fd(attr->target_fd);
928 return PTR_ERR(cgrp);
930 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
931 (attr->attach_flags & BPF_F_REPLACE)) {
932 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
933 if (IS_ERR(replace_prog)) {
935 return PTR_ERR(replace_prog);
939 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
940 attr->attach_type, attr->attach_flags);
943 bpf_prog_put(replace_prog);
948 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
950 struct bpf_prog *prog;
954 cgrp = cgroup_get_from_fd(attr->target_fd);
956 return PTR_ERR(cgrp);
958 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
962 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
970 static void bpf_cgroup_link_release(struct bpf_link *link)
972 struct bpf_cgroup_link *cg_link =
973 container_of(link, struct bpf_cgroup_link, link);
976 /* link might have been auto-detached by dying cgroup already,
977 * in that case our work is done here
979 if (!cg_link->cgroup)
982 mutex_lock(&cgroup_mutex);
984 /* re-check cgroup under lock again */
985 if (!cg_link->cgroup) {
986 mutex_unlock(&cgroup_mutex);
990 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
993 cg = cg_link->cgroup;
994 cg_link->cgroup = NULL;
996 mutex_unlock(&cgroup_mutex);
1001 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
1003 struct bpf_cgroup_link *cg_link =
1004 container_of(link, struct bpf_cgroup_link, link);
1009 static int bpf_cgroup_link_detach(struct bpf_link *link)
1011 bpf_cgroup_link_release(link);
1016 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
1017 struct seq_file *seq)
1019 struct bpf_cgroup_link *cg_link =
1020 container_of(link, struct bpf_cgroup_link, link);
1023 mutex_lock(&cgroup_mutex);
1024 if (cg_link->cgroup)
1025 cg_id = cgroup_id(cg_link->cgroup);
1026 mutex_unlock(&cgroup_mutex);
1029 "cgroup_id:\t%llu\n"
1030 "attach_type:\t%d\n",
1035 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
1036 struct bpf_link_info *info)
1038 struct bpf_cgroup_link *cg_link =
1039 container_of(link, struct bpf_cgroup_link, link);
1042 mutex_lock(&cgroup_mutex);
1043 if (cg_link->cgroup)
1044 cg_id = cgroup_id(cg_link->cgroup);
1045 mutex_unlock(&cgroup_mutex);
1047 info->cgroup.cgroup_id = cg_id;
1048 info->cgroup.attach_type = cg_link->type;
1052 static const struct bpf_link_ops bpf_cgroup_link_lops = {
1053 .release = bpf_cgroup_link_release,
1054 .dealloc = bpf_cgroup_link_dealloc,
1055 .detach = bpf_cgroup_link_detach,
1056 .update_prog = cgroup_bpf_replace,
1057 .show_fdinfo = bpf_cgroup_link_show_fdinfo,
1058 .fill_link_info = bpf_cgroup_link_fill_link_info,
1061 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
1063 struct bpf_link_primer link_primer;
1064 struct bpf_cgroup_link *link;
1065 struct cgroup *cgrp;
1068 if (attr->link_create.flags)
1071 cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
1073 return PTR_ERR(cgrp);
1075 link = kzalloc(sizeof(*link), GFP_USER);
1078 goto out_put_cgroup;
1080 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1082 link->cgroup = cgrp;
1083 link->type = attr->link_create.attach_type;
1085 err = bpf_link_prime(&link->link, &link_primer);
1088 goto out_put_cgroup;
1091 err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1092 link->type, BPF_F_ALLOW_MULTI);
1094 bpf_link_cleanup(&link_primer);
1095 goto out_put_cgroup;
1098 return bpf_link_settle(&link_primer);
1105 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1106 union bpf_attr __user *uattr)
1108 struct cgroup *cgrp;
1111 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1113 return PTR_ERR(cgrp);
1115 ret = cgroup_bpf_query(cgrp, attr, uattr);
1122 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1123 * @sk: The socket sending or receiving traffic
1124 * @skb: The skb that is being sent or received
1125 * @type: The type of program to be executed
1127 * If no socket is passed, or the socket is not of type INET or INET6,
1128 * this function does nothing and returns 0.
1130 * The program type passed in via @type must be suitable for network
1131 * filtering. No further check is performed to assert that.
1133 * For egress packets, this function can return:
1134 * NET_XMIT_SUCCESS (0) - continue with packet output
1135 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
1136 * NET_XMIT_CN (2) - continue with packet output and notify TCP
1138 * -err - drop packet
1140 * For ingress packets, this function will return -EPERM if any
1141 * attached program was found and if it returned != 1 during execution.
1142 * Otherwise 0 is returned.
1144 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1145 struct sk_buff *skb,
1146 enum cgroup_bpf_attach_type atype)
1148 unsigned int offset = skb->data - skb_network_header(skb);
1149 struct sock *save_sk;
1150 void *saved_data_end;
1151 struct cgroup *cgrp;
1154 if (!sk || !sk_fullsock(sk))
1157 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1160 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1163 __skb_push(skb, offset);
1165 /* compute pointers for the bpf prog */
1166 bpf_compute_and_save_data_end(skb, &saved_data_end);
1168 if (atype == CGROUP_INET_EGRESS) {
1172 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1173 __bpf_prog_run_save_cb, 0, &flags);
1175 /* Return values of CGROUP EGRESS BPF programs are:
1178 * 2: drop packet and cn
1179 * 3: keep packet and cn
1181 * The returned value is then converted to one of the NET_XMIT
1182 * or an error code that is then interpreted as drop packet
1184 * 0: NET_XMIT_SUCCESS skb should be transmitted
1185 * 1: NET_XMIT_DROP skb should be dropped and cn
1186 * 2: NET_XMIT_CN skb should be transmitted and cn
1187 * 3: -err skb should be dropped
1190 cn = flags & BPF_RET_SET_CN;
1191 if (ret && !IS_ERR_VALUE((long)ret))
1194 ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
1196 ret = (cn ? NET_XMIT_DROP : ret);
1198 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1199 skb, __bpf_prog_run_save_cb, 0,
1201 if (ret && !IS_ERR_VALUE((long)ret))
1204 bpf_restore_data_end(skb, saved_data_end);
1205 __skb_pull(skb, offset);
1210 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1213 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1214 * @sk: sock structure to manipulate
1215 * @type: The type of program to be executed
1217 * socket is passed is expected to be of type INET or INET6.
1219 * The program type passed in via @type must be suitable for sock
1220 * filtering. No further check is performed to assert that.
1222 * This function will return %-EPERM if any if an attached program was found
1223 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1225 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1226 enum cgroup_bpf_attach_type atype)
1228 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1230 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1233 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1236 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1237 * provided by user sockaddr
1238 * @sk: sock struct that will use sockaddr
1239 * @uaddr: sockaddr struct provided by user
1240 * @type: The type of program to be executed
1241 * @t_ctx: Pointer to attach type specific context
1242 * @flags: Pointer to u32 which contains higher bits of BPF program
1243 * return value (OR'ed together).
1245 * socket is expected to be of type INET or INET6.
1247 * This function will return %-EPERM if an attached program is found and
1248 * returned value != 1 during execution. In all other cases, 0 is returned.
1250 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1251 struct sockaddr *uaddr,
1252 enum cgroup_bpf_attach_type atype,
1256 struct bpf_sock_addr_kern ctx = {
1261 struct sockaddr_storage unspec;
1262 struct cgroup *cgrp;
1264 /* Check socket family since not all sockets represent network
1265 * endpoint (e.g. AF_UNIX).
1267 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1271 memset(&unspec, 0, sizeof(unspec));
1272 ctx.uaddr = (struct sockaddr *)&unspec;
1275 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1276 return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1279 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1282 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1283 * @sk: socket to get cgroup from
1284 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1285 * sk with connection information (IP addresses, etc.) May not contain
1286 * cgroup info if it is a req sock.
1287 * @type: The type of program to be executed
1289 * socket passed is expected to be of type INET or INET6.
1291 * The program type passed in via @type must be suitable for sock_ops
1292 * filtering. No further check is performed to assert that.
1294 * This function will return %-EPERM if any if an attached program was found
1295 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1297 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1298 struct bpf_sock_ops_kern *sock_ops,
1299 enum cgroup_bpf_attach_type atype)
1301 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1303 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1306 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1308 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1309 short access, enum cgroup_bpf_attach_type atype)
1311 struct cgroup *cgrp;
1312 struct bpf_cgroup_dev_ctx ctx = {
1313 .access_type = (access << 16) | dev_type,
1320 cgrp = task_dfl_cgroup(current);
1321 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1328 BPF_CALL_0(bpf_get_retval)
1330 struct bpf_cg_run_ctx *ctx =
1331 container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1336 static const struct bpf_func_proto bpf_get_retval_proto = {
1337 .func = bpf_get_retval,
1339 .ret_type = RET_INTEGER,
1342 BPF_CALL_1(bpf_set_retval, int, retval)
1344 struct bpf_cg_run_ctx *ctx =
1345 container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1347 ctx->retval = retval;
1351 static const struct bpf_func_proto bpf_set_retval_proto = {
1352 .func = bpf_set_retval,
1354 .ret_type = RET_INTEGER,
1355 .arg1_type = ARG_ANYTHING,
1358 static const struct bpf_func_proto *
1359 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1362 case BPF_FUNC_get_current_uid_gid:
1363 return &bpf_get_current_uid_gid_proto;
1364 case BPF_FUNC_get_local_storage:
1365 return &bpf_get_local_storage_proto;
1366 case BPF_FUNC_get_current_cgroup_id:
1367 return &bpf_get_current_cgroup_id_proto;
1368 case BPF_FUNC_perf_event_output:
1369 return &bpf_event_output_data_proto;
1370 case BPF_FUNC_get_retval:
1371 return &bpf_get_retval_proto;
1372 case BPF_FUNC_set_retval:
1373 return &bpf_set_retval_proto;
1375 return bpf_base_func_proto(func_id);
1379 static const struct bpf_func_proto *
1380 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1382 return cgroup_base_func_proto(func_id, prog);
1385 static bool cgroup_dev_is_valid_access(int off, int size,
1386 enum bpf_access_type type,
1387 const struct bpf_prog *prog,
1388 struct bpf_insn_access_aux *info)
1390 const int size_default = sizeof(__u32);
1392 if (type == BPF_WRITE)
1395 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1397 /* The verifier guarantees that size > 0. */
1398 if (off % size != 0)
1402 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1403 bpf_ctx_record_field_size(info, size_default);
1404 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1408 if (size != size_default)
1415 const struct bpf_prog_ops cg_dev_prog_ops = {
1418 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1419 .get_func_proto = cgroup_dev_func_proto,
1420 .is_valid_access = cgroup_dev_is_valid_access,
1424 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1426 * @head: sysctl table header
1427 * @table: sysctl table
1428 * @write: sysctl is being read (= 0) or written (= 1)
1429 * @buf: pointer to buffer (in and out)
1430 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1431 * result is size of @new_buf if program set new value, initial value
1433 * @ppos: value-result argument: value is position at which read from or write
1434 * to sysctl is happening, result is new position if program overrode it,
1435 * initial value otherwise
1436 * @type: type of program to be executed
1438 * Program is run when sysctl is being accessed, either read or written, and
1439 * can allow or deny such access.
1441 * This function will return %-EPERM if an attached program is found and
1442 * returned value != 1 during execution. In all other cases 0 is returned.
1444 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1445 struct ctl_table *table, int write,
1446 char **buf, size_t *pcount, loff_t *ppos,
1447 enum cgroup_bpf_attach_type atype)
1449 struct bpf_sysctl_kern ctx = {
1455 .cur_len = PAGE_SIZE,
1460 struct cgroup *cgrp;
1464 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1466 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1467 /* Let BPF program decide how to proceed. */
1471 if (write && *buf && *pcount) {
1472 /* BPF program should be able to override new value with a
1473 * buffer bigger than provided by user.
1475 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1476 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1478 memcpy(ctx.new_val, *buf, ctx.new_len);
1480 /* Let BPF program decide how to proceed. */
1486 cgrp = task_dfl_cgroup(current);
1487 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1493 if (ret == 1 && ctx.new_updated) {
1496 *pcount = ctx.new_len;
1505 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1506 struct bpf_sockopt_buf *buf)
1508 if (unlikely(max_optlen < 0))
1511 if (unlikely(max_optlen > PAGE_SIZE)) {
1512 /* We don't expose optvals that are greater than PAGE_SIZE
1513 * to the BPF program.
1515 max_optlen = PAGE_SIZE;
1518 if (max_optlen <= sizeof(buf->data)) {
1519 /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1520 * bytes avoid the cost of kzalloc.
1522 ctx->optval = buf->data;
1523 ctx->optval_end = ctx->optval + max_optlen;
1527 ctx->optval = kzalloc(max_optlen, GFP_USER);
1531 ctx->optval_end = ctx->optval + max_optlen;
1536 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1537 struct bpf_sockopt_buf *buf)
1539 if (ctx->optval == buf->data)
1544 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1545 struct bpf_sockopt_buf *buf)
1547 return ctx->optval != buf->data;
1550 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1551 int *optname, char __user *optval,
1552 int *optlen, char **kernel_optval)
1554 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1555 struct bpf_sockopt_buf buf = {};
1556 struct bpf_sockopt_kern ctx = {
1559 .optname = *optname,
1561 int ret, max_optlen;
1563 /* Allocate a bit more than the initial user buffer for
1564 * BPF program. The canonical use case is overriding
1565 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1567 max_optlen = max_t(int, 16, *optlen);
1568 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1572 ctx.optlen = *optlen;
1574 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1580 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
1581 &ctx, bpf_prog_run, 0, NULL);
1587 if (ctx.optlen == -1) {
1588 /* optlen set to -1, bypass kernel */
1590 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1591 /* optlen is out of bounds */
1594 /* optlen within bounds, run kernel handler */
1597 /* export any potential modifications */
1599 *optname = ctx.optname;
1601 /* optlen == 0 from BPF indicates that we should
1602 * use original userspace data.
1604 if (ctx.optlen != 0) {
1605 *optlen = ctx.optlen;
1606 /* We've used bpf_sockopt_kern->buf as an intermediary
1607 * storage, but the BPF program indicates that we need
1608 * to pass this data to the kernel setsockopt handler.
1609 * No way to export on-stack buf, have to allocate a
1612 if (!sockopt_buf_allocated(&ctx, &buf)) {
1613 void *p = kmalloc(ctx.optlen, GFP_USER);
1619 memcpy(p, ctx.optval, ctx.optlen);
1622 *kernel_optval = ctx.optval;
1624 /* export and don't free sockopt buf */
1630 sockopt_free_buf(&ctx, &buf);
1634 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1635 int optname, char __user *optval,
1636 int __user *optlen, int max_optlen,
1639 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1640 struct bpf_sockopt_buf buf = {};
1641 struct bpf_sockopt_kern ctx = {
1645 .current_task = current,
1649 ctx.optlen = max_optlen;
1650 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1655 /* If kernel getsockopt finished successfully,
1656 * copy whatever was returned to the user back
1657 * into our temporary buffer. Set optlen to the
1658 * one that kernel returned as well to let
1659 * BPF programs inspect the value.
1662 if (get_user(ctx.optlen, optlen)) {
1667 if (ctx.optlen < 0) {
1672 if (copy_from_user(ctx.optval, optval,
1673 min(ctx.optlen, max_optlen)) != 0) {
1680 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1681 &ctx, bpf_prog_run, retval, NULL);
1687 if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1692 if (ctx.optlen != 0) {
1693 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1694 put_user(ctx.optlen, optlen)) {
1701 sockopt_free_buf(&ctx, &buf);
1705 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1706 int optname, void *optval,
1707 int *optlen, int retval)
1709 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1710 struct bpf_sockopt_kern ctx = {
1716 .optval_end = optval + *optlen,
1717 .current_task = current,
1721 /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1722 * user data back into BPF buffer when reval != 0. This is
1723 * done as an optimization to avoid extra copy, assuming
1724 * kernel won't populate the data in case of an error.
1725 * Here we always pass the data and memset() should
1726 * be called if that data shouldn't be "exported".
1729 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1730 &ctx, bpf_prog_run, retval, NULL);
1734 if (ctx.optlen > *optlen)
1737 /* BPF programs can shrink the buffer, export the modifications.
1739 if (ctx.optlen != 0)
1740 *optlen = ctx.optlen;
1746 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1749 ssize_t tmp_ret = 0, ret;
1751 if (dir->header.parent) {
1752 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1757 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1764 /* Avoid leading slash. */
1768 tmp_ret = strscpy(*bufp, "/", *lenp);
1774 return ret + tmp_ret;
1777 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1778 size_t, buf_len, u64, flags)
1780 ssize_t tmp_ret = 0, ret;
1785 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1788 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1793 ret = strscpy(buf, ctx->table->procname, buf_len);
1795 return ret < 0 ? ret : tmp_ret + ret;
1798 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1799 .func = bpf_sysctl_get_name,
1801 .ret_type = RET_INTEGER,
1802 .arg1_type = ARG_PTR_TO_CTX,
1803 .arg2_type = ARG_PTR_TO_MEM,
1804 .arg3_type = ARG_CONST_SIZE,
1805 .arg4_type = ARG_ANYTHING,
1808 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1817 if (!src || !src_len) {
1818 memset(dst, 0, dst_len);
1822 memcpy(dst, src, min(dst_len, src_len));
1824 if (dst_len > src_len) {
1825 memset(dst + src_len, '\0', dst_len - src_len);
1829 dst[dst_len - 1] = '\0';
1834 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1835 char *, buf, size_t, buf_len)
1837 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1840 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1841 .func = bpf_sysctl_get_current_value,
1843 .ret_type = RET_INTEGER,
1844 .arg1_type = ARG_PTR_TO_CTX,
1845 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1846 .arg3_type = ARG_CONST_SIZE,
1849 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1854 memset(buf, '\0', buf_len);
1857 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1860 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1861 .func = bpf_sysctl_get_new_value,
1863 .ret_type = RET_INTEGER,
1864 .arg1_type = ARG_PTR_TO_CTX,
1865 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1866 .arg3_type = ARG_CONST_SIZE,
1869 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1870 const char *, buf, size_t, buf_len)
1872 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1875 if (buf_len > PAGE_SIZE - 1)
1878 memcpy(ctx->new_val, buf, buf_len);
1879 ctx->new_len = buf_len;
1880 ctx->new_updated = 1;
1885 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1886 .func = bpf_sysctl_set_new_value,
1888 .ret_type = RET_INTEGER,
1889 .arg1_type = ARG_PTR_TO_CTX,
1890 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1891 .arg3_type = ARG_CONST_SIZE,
1894 static const struct bpf_func_proto *
1895 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1898 case BPF_FUNC_strtol:
1899 return &bpf_strtol_proto;
1900 case BPF_FUNC_strtoul:
1901 return &bpf_strtoul_proto;
1902 case BPF_FUNC_sysctl_get_name:
1903 return &bpf_sysctl_get_name_proto;
1904 case BPF_FUNC_sysctl_get_current_value:
1905 return &bpf_sysctl_get_current_value_proto;
1906 case BPF_FUNC_sysctl_get_new_value:
1907 return &bpf_sysctl_get_new_value_proto;
1908 case BPF_FUNC_sysctl_set_new_value:
1909 return &bpf_sysctl_set_new_value_proto;
1910 case BPF_FUNC_ktime_get_coarse_ns:
1911 return &bpf_ktime_get_coarse_ns_proto;
1913 return cgroup_base_func_proto(func_id, prog);
1917 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1918 const struct bpf_prog *prog,
1919 struct bpf_insn_access_aux *info)
1921 const int size_default = sizeof(__u32);
1923 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1927 case bpf_ctx_range(struct bpf_sysctl, write):
1928 if (type != BPF_READ)
1930 bpf_ctx_record_field_size(info, size_default);
1931 return bpf_ctx_narrow_access_ok(off, size, size_default);
1932 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1933 if (type == BPF_READ) {
1934 bpf_ctx_record_field_size(info, size_default);
1935 return bpf_ctx_narrow_access_ok(off, size, size_default);
1937 return size == size_default;
1944 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1945 const struct bpf_insn *si,
1946 struct bpf_insn *insn_buf,
1947 struct bpf_prog *prog, u32 *target_size)
1949 struct bpf_insn *insn = insn_buf;
1953 case offsetof(struct bpf_sysctl, write):
1954 *insn++ = BPF_LDX_MEM(
1955 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1956 bpf_target_off(struct bpf_sysctl_kern, write,
1957 sizeof_field(struct bpf_sysctl_kern,
1961 case offsetof(struct bpf_sysctl, file_pos):
1962 /* ppos is a pointer so it should be accessed via indirect
1963 * loads and stores. Also for stores additional temporary
1964 * register is used since neither src_reg nor dst_reg can be
1967 if (type == BPF_WRITE) {
1968 int treg = BPF_REG_9;
1970 if (si->src_reg == treg || si->dst_reg == treg)
1972 if (si->src_reg == treg || si->dst_reg == treg)
1974 *insn++ = BPF_STX_MEM(
1975 BPF_DW, si->dst_reg, treg,
1976 offsetof(struct bpf_sysctl_kern, tmp_reg));
1977 *insn++ = BPF_LDX_MEM(
1978 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1980 offsetof(struct bpf_sysctl_kern, ppos));
1981 *insn++ = BPF_STX_MEM(
1982 BPF_SIZEOF(u32), treg, si->src_reg,
1983 bpf_ctx_narrow_access_offset(
1984 0, sizeof(u32), sizeof(loff_t)));
1985 *insn++ = BPF_LDX_MEM(
1986 BPF_DW, treg, si->dst_reg,
1987 offsetof(struct bpf_sysctl_kern, tmp_reg));
1989 *insn++ = BPF_LDX_MEM(
1990 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1991 si->dst_reg, si->src_reg,
1992 offsetof(struct bpf_sysctl_kern, ppos));
1993 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1994 *insn++ = BPF_LDX_MEM(
1995 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1996 bpf_ctx_narrow_access_offset(
1997 0, read_size, sizeof(loff_t)));
1999 *target_size = sizeof(u32);
2003 return insn - insn_buf;
2006 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
2007 .get_func_proto = sysctl_func_proto,
2008 .is_valid_access = sysctl_is_valid_access,
2009 .convert_ctx_access = sysctl_convert_ctx_access,
2012 const struct bpf_prog_ops cg_sysctl_prog_ops = {
2016 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
2018 const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
2020 return net->net_cookie;
2023 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
2024 .func = bpf_get_netns_cookie_sockopt,
2026 .ret_type = RET_INTEGER,
2027 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
2031 static const struct bpf_func_proto *
2032 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2036 case BPF_FUNC_get_netns_cookie:
2037 return &bpf_get_netns_cookie_sockopt_proto;
2038 case BPF_FUNC_sk_storage_get:
2039 return &bpf_sk_storage_get_proto;
2040 case BPF_FUNC_sk_storage_delete:
2041 return &bpf_sk_storage_delete_proto;
2042 case BPF_FUNC_setsockopt:
2043 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2044 return &bpf_sk_setsockopt_proto;
2046 case BPF_FUNC_getsockopt:
2047 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2048 return &bpf_sk_getsockopt_proto;
2052 case BPF_FUNC_tcp_sock:
2053 return &bpf_tcp_sock_proto;
2056 return cgroup_base_func_proto(func_id, prog);
2060 static bool cg_sockopt_is_valid_access(int off, int size,
2061 enum bpf_access_type type,
2062 const struct bpf_prog *prog,
2063 struct bpf_insn_access_aux *info)
2065 const int size_default = sizeof(__u32);
2067 if (off < 0 || off >= sizeof(struct bpf_sockopt))
2070 if (off % size != 0)
2073 if (type == BPF_WRITE) {
2075 case offsetof(struct bpf_sockopt, retval):
2076 if (size != size_default)
2078 return prog->expected_attach_type ==
2079 BPF_CGROUP_GETSOCKOPT;
2080 case offsetof(struct bpf_sockopt, optname):
2082 case offsetof(struct bpf_sockopt, level):
2083 if (size != size_default)
2085 return prog->expected_attach_type ==
2086 BPF_CGROUP_SETSOCKOPT;
2087 case offsetof(struct bpf_sockopt, optlen):
2088 return size == size_default;
2095 case offsetof(struct bpf_sockopt, sk):
2096 if (size != sizeof(__u64))
2098 info->reg_type = PTR_TO_SOCKET;
2100 case offsetof(struct bpf_sockopt, optval):
2101 if (size != sizeof(__u64))
2103 info->reg_type = PTR_TO_PACKET;
2105 case offsetof(struct bpf_sockopt, optval_end):
2106 if (size != sizeof(__u64))
2108 info->reg_type = PTR_TO_PACKET_END;
2110 case offsetof(struct bpf_sockopt, retval):
2111 if (size != size_default)
2113 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2115 if (size != size_default)
2122 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
2123 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
2124 si->dst_reg, si->src_reg, \
2125 offsetof(struct bpf_sockopt_kern, F))
2127 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2128 const struct bpf_insn *si,
2129 struct bpf_insn *insn_buf,
2130 struct bpf_prog *prog,
2133 struct bpf_insn *insn = insn_buf;
2136 case offsetof(struct bpf_sockopt, sk):
2137 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2139 case offsetof(struct bpf_sockopt, level):
2140 if (type == BPF_WRITE)
2141 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2143 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2145 case offsetof(struct bpf_sockopt, optname):
2146 if (type == BPF_WRITE)
2147 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2149 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2151 case offsetof(struct bpf_sockopt, optlen):
2152 if (type == BPF_WRITE)
2153 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2155 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2157 case offsetof(struct bpf_sockopt, retval):
2158 BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2160 if (type == BPF_WRITE) {
2161 int treg = BPF_REG_9;
2163 if (si->src_reg == treg || si->dst_reg == treg)
2165 if (si->src_reg == treg || si->dst_reg == treg)
2167 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2168 offsetof(struct bpf_sockopt_kern, tmp_reg));
2169 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2171 offsetof(struct bpf_sockopt_kern, current_task));
2172 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2174 offsetof(struct task_struct, bpf_ctx));
2175 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2177 offsetof(struct bpf_cg_run_ctx, retval));
2178 *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2179 offsetof(struct bpf_sockopt_kern, tmp_reg));
2181 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2182 si->dst_reg, si->src_reg,
2183 offsetof(struct bpf_sockopt_kern, current_task));
2184 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2185 si->dst_reg, si->dst_reg,
2186 offsetof(struct task_struct, bpf_ctx));
2187 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2188 si->dst_reg, si->dst_reg,
2189 offsetof(struct bpf_cg_run_ctx, retval));
2192 case offsetof(struct bpf_sockopt, optval):
2193 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2195 case offsetof(struct bpf_sockopt, optval_end):
2196 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2200 return insn - insn_buf;
2203 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2205 const struct bpf_prog *prog)
2207 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
2212 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2213 .get_func_proto = cg_sockopt_func_proto,
2214 .is_valid_access = cg_sockopt_is_valid_access,
2215 .convert_ctx_access = cg_sockopt_convert_ctx_access,
2216 .gen_prologue = cg_sockopt_get_prologue,
2219 const struct bpf_prog_ops cg_sockopt_prog_ops = {