GNU Linux-libre 5.10.217-gnu1
[releases.git] / include / linux / bpf-cgroup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/rbtree.h>
11 #include <uapi/linux/bpf.h>
12
13 struct sock;
14 struct sockaddr;
15 struct cgroup;
16 struct sk_buff;
17 struct bpf_map;
18 struct bpf_prog;
19 struct bpf_sock_ops_kern;
20 struct bpf_cgroup_storage;
21 struct ctl_table;
22 struct ctl_table_header;
23 struct task_struct;
24
25 #ifdef CONFIG_CGROUP_BPF
26
27 extern struct static_key_false cgroup_bpf_enabled_key;
28 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
29
30 #define BPF_CGROUP_STORAGE_NEST_MAX     8
31
32 struct bpf_cgroup_storage_info {
33         struct task_struct *task;
34         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
35 };
36
37 /* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
38  * to use bpf cgroup storage simultaneously.
39  */
40 DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
41                 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
42
43 #define for_each_cgroup_storage_type(stype) \
44         for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
45
46 struct bpf_cgroup_storage_map;
47
48 struct bpf_storage_buffer {
49         struct rcu_head rcu;
50         char data[];
51 };
52
53 struct bpf_cgroup_storage {
54         union {
55                 struct bpf_storage_buffer *buf;
56                 void __percpu *percpu_buf;
57         };
58         struct bpf_cgroup_storage_map *map;
59         struct bpf_cgroup_storage_key key;
60         struct list_head list_map;
61         struct list_head list_cg;
62         struct rb_node node;
63         struct rcu_head rcu;
64 };
65
66 struct bpf_cgroup_link {
67         struct bpf_link link;
68         struct cgroup *cgroup;
69         enum bpf_attach_type type;
70 };
71
72 struct bpf_prog_list {
73         struct list_head node;
74         struct bpf_prog *prog;
75         struct bpf_cgroup_link *link;
76         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
77 };
78
79 struct bpf_prog_array;
80
81 struct cgroup_bpf {
82         /* array of effective progs in this cgroup */
83         struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
84
85         /* attached progs to this cgroup and attach flags
86          * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
87          * have either zero or one element
88          * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
89          */
90         struct list_head progs[MAX_BPF_ATTACH_TYPE];
91         u32 flags[MAX_BPF_ATTACH_TYPE];
92
93         /* list of cgroup shared storages */
94         struct list_head storages;
95
96         /* temp storage for effective prog array used by prog_attach/detach */
97         struct bpf_prog_array *inactive;
98
99         /* reference counter used to detach bpf programs after cgroup removal */
100         struct percpu_ref refcnt;
101
102         /* cgroup_bpf is released using a work queue */
103         struct work_struct release_work;
104 };
105
106 int cgroup_bpf_inherit(struct cgroup *cgrp);
107 void cgroup_bpf_offline(struct cgroup *cgrp);
108
109 int __cgroup_bpf_attach(struct cgroup *cgrp,
110                         struct bpf_prog *prog, struct bpf_prog *replace_prog,
111                         struct bpf_cgroup_link *link,
112                         enum bpf_attach_type type, u32 flags);
113 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
114                         struct bpf_cgroup_link *link,
115                         enum bpf_attach_type type);
116 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
117                        union bpf_attr __user *uattr);
118
119 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
120 int cgroup_bpf_attach(struct cgroup *cgrp,
121                       struct bpf_prog *prog, struct bpf_prog *replace_prog,
122                       struct bpf_cgroup_link *link, enum bpf_attach_type type,
123                       u32 flags);
124 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
125                       enum bpf_attach_type type);
126 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
127                      union bpf_attr __user *uattr);
128
129 int __cgroup_bpf_run_filter_skb(struct sock *sk,
130                                 struct sk_buff *skb,
131                                 enum bpf_attach_type type);
132
133 int __cgroup_bpf_run_filter_sk(struct sock *sk,
134                                enum bpf_attach_type type);
135
136 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
137                                       struct sockaddr *uaddr,
138                                       enum bpf_attach_type type,
139                                       void *t_ctx);
140
141 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
142                                      struct bpf_sock_ops_kern *sock_ops,
143                                      enum bpf_attach_type type);
144
145 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
146                                       short access, enum bpf_attach_type type);
147
148 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
149                                    struct ctl_table *table, int write,
150                                    char **buf, size_t *pcount, loff_t *ppos,
151                                    enum bpf_attach_type type);
152
153 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
154                                        int *optname, char __user *optval,
155                                        int *optlen, char **kernel_optval);
156 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
157                                        int optname, char __user *optval,
158                                        int __user *optlen, int max_optlen,
159                                        int retval);
160
161 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
162                                             int optname, void *optval,
163                                             int *optlen, int retval);
164
165 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
166         struct bpf_map *map)
167 {
168         if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
169                 return BPF_CGROUP_STORAGE_PERCPU;
170
171         return BPF_CGROUP_STORAGE_SHARED;
172 }
173
174 static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
175                                          *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
176 {
177         enum bpf_cgroup_storage_type stype;
178         int i, err = 0;
179
180         preempt_disable();
181         for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
182                 if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
183                         continue;
184
185                 this_cpu_write(bpf_cgroup_storage_info[i].task, current);
186                 for_each_cgroup_storage_type(stype)
187                         this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
188                                        storage[stype]);
189                 goto out;
190         }
191         err = -EBUSY;
192         WARN_ON_ONCE(1);
193
194 out:
195         preempt_enable();
196         return err;
197 }
198
199 static inline void bpf_cgroup_storage_unset(void)
200 {
201         int i;
202
203         for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
204                 if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
205                         continue;
206
207                 this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
208                 return;
209         }
210 }
211
212 struct bpf_cgroup_storage *
213 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
214                       void *key, bool locked);
215 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
216                                         enum bpf_cgroup_storage_type stype);
217 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
218 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
219                              struct cgroup *cgroup,
220                              enum bpf_attach_type type);
221 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
222 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
223
224 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
225 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
226                                      void *value, u64 flags);
227
228 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
229 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                             \
230 ({                                                                            \
231         int __ret = 0;                                                        \
232         if (cgroup_bpf_enabled)                                               \
233                 __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
234                                                     BPF_CGROUP_INET_INGRESS); \
235                                                                               \
236         __ret;                                                                \
237 })
238
239 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                               \
240 ({                                                                             \
241         int __ret = 0;                                                         \
242         if (cgroup_bpf_enabled && sk && sk == skb->sk) {                       \
243                 typeof(sk) __sk = sk_to_full_sk(sk);                           \
244                 if (sk_fullsock(__sk))                                         \
245                         __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
246                                                       BPF_CGROUP_INET_EGRESS); \
247         }                                                                      \
248         __ret;                                                                 \
249 })
250
251 #define BPF_CGROUP_RUN_SK_PROG(sk, type)                                       \
252 ({                                                                             \
253         int __ret = 0;                                                         \
254         if (cgroup_bpf_enabled) {                                              \
255                 __ret = __cgroup_bpf_run_filter_sk(sk, type);                  \
256         }                                                                      \
257         __ret;                                                                 \
258 })
259
260 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                      \
261         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
262
263 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)                              \
264         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
265
266 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                \
267         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
268
269 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                \
270         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
271
272 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)                                \
273 ({                                                                             \
274         int __ret = 0;                                                         \
275         if (cgroup_bpf_enabled)                                                \
276                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
277                                                           NULL);               \
278         __ret;                                                                 \
279 })
280
281 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)                    \
282 ({                                                                             \
283         int __ret = 0;                                                         \
284         if (cgroup_bpf_enabled) {                                              \
285                 lock_sock(sk);                                                 \
286                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
287                                                           t_ctx);              \
288                 release_sock(sk);                                              \
289         }                                                                      \
290         __ret;                                                                 \
291 })
292
293 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)                              \
294         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
295
296 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)                              \
297         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
298
299 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
300                                             sk->sk_prot->pre_connect)
301
302 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)                           \
303         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
304
305 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)                           \
306         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
307
308 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)                      \
309         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
310
311 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)                      \
312         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
313
314 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
315         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
316
317 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
318         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
319
320 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                        \
321         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
322
323 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                        \
324         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
325
326 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
327  * fullsock and its parent fullsock cannot be traced by
328  * sk_to_full_sk().
329  *
330  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
331  * Its listener-sk is not attached to the rsk_listener.
332  * In this case, the caller holds the listener-sk (unlocked),
333  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
334  * the listener-sk such that the cgroup-bpf-progs of the
335  * listener-sk will be run.
336  *
337  * Regardless of syncookie mode or not,
338  * calling bpf_setsockopt on listener-sk will not make sense anyway,
339  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
340  */
341 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)                   \
342 ({                                                                      \
343         int __ret = 0;                                                  \
344         if (cgroup_bpf_enabled)                                         \
345                 __ret = __cgroup_bpf_run_filter_sock_ops(sk,            \
346                                                          sock_ops,      \
347                                                          BPF_CGROUP_SOCK_OPS); \
348         __ret;                                                          \
349 })
350
351 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                 \
352 ({                                                                             \
353         int __ret = 0;                                                         \
354         if (cgroup_bpf_enabled && (sock_ops)->sk) {            \
355                 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
356                 if (__sk && sk_fullsock(__sk))                                 \
357                         __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
358                                                                  sock_ops,     \
359                                                          BPF_CGROUP_SOCK_OPS); \
360         }                                                                      \
361         __ret;                                                                 \
362 })
363
364 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)         \
365 ({                                                                            \
366         int __ret = 0;                                                        \
367         if (cgroup_bpf_enabled)                                               \
368                 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
369                                                           access,             \
370                                                           BPF_CGROUP_DEVICE); \
371                                                                               \
372         __ret;                                                                \
373 })
374
375
376 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
377 ({                                                                             \
378         int __ret = 0;                                                         \
379         if (cgroup_bpf_enabled)                                                \
380                 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
381                                                        buf, count, pos,        \
382                                                        BPF_CGROUP_SYSCTL);     \
383         __ret;                                                                 \
384 })
385
386 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
387                                        kernel_optval)                          \
388 ({                                                                             \
389         int __ret = 0;                                                         \
390         if (cgroup_bpf_enabled)                                                \
391                 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level,        \
392                                                            optname, optval,    \
393                                                            optlen,             \
394                                                            kernel_optval);     \
395         __ret;                                                                 \
396 })
397
398 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)                               \
399 ({                                                                             \
400         int __ret = 0;                                                         \
401         if (cgroup_bpf_enabled)                                                \
402                 get_user(__ret, optlen);                                       \
403         __ret;                                                                 \
404 })
405
406 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
407                                        max_optlen, retval)                     \
408 ({                                                                             \
409         int __ret = retval;                                                    \
410         if (cgroup_bpf_enabled)                                                \
411                 if (!(sock)->sk_prot->bpf_bypass_getsockopt ||                 \
412                     !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
413                                         tcp_bpf_bypass_getsockopt,             \
414                                         level, optname))                       \
415                         __ret = __cgroup_bpf_run_filter_getsockopt(            \
416                                 sock, level, optname, optval, optlen,          \
417                                 max_optlen, retval);                           \
418         __ret;                                                                 \
419 })
420
421 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
422                                             optlen, retval)                    \
423 ({                                                                             \
424         int __ret = retval;                                                    \
425         if (cgroup_bpf_enabled)                                                \
426                 __ret = __cgroup_bpf_run_filter_getsockopt_kern(               \
427                         sock, level, optname, optval, optlen, retval);         \
428         __ret;                                                                 \
429 })
430
431 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
432                            enum bpf_prog_type ptype, struct bpf_prog *prog);
433 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
434                            enum bpf_prog_type ptype);
435 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
436 int cgroup_bpf_prog_query(const union bpf_attr *attr,
437                           union bpf_attr __user *uattr);
438 #else
439
440 struct bpf_prog;
441 struct cgroup_bpf {};
442 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
443 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
444
445 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
446                                          enum bpf_prog_type ptype,
447                                          struct bpf_prog *prog)
448 {
449         return -EINVAL;
450 }
451
452 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
453                                          enum bpf_prog_type ptype)
454 {
455         return -EINVAL;
456 }
457
458 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
459                                          struct bpf_prog *prog)
460 {
461         return -EINVAL;
462 }
463
464 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
465                                         union bpf_attr __user *uattr)
466 {
467         return -EINVAL;
468 }
469
470 static inline int bpf_cgroup_storage_set(
471         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
472 static inline void bpf_cgroup_storage_unset(void) {}
473 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
474                                             struct bpf_map *map) { return 0; }
475 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
476         struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
477 static inline void bpf_cgroup_storage_free(
478         struct bpf_cgroup_storage *storage) {}
479 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
480                                                  void *value) {
481         return 0;
482 }
483 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
484                                         void *key, void *value, u64 flags) {
485         return 0;
486 }
487
488 #define cgroup_bpf_enabled (0)
489 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
490 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
491 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
492 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
493 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
494 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
495 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
496 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
497 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
498 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
499 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
500 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
501 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
502 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
503 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
504 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
505 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
506 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
507 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
508 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
509 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
510 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
511 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
512                                        optlen, max_optlen, retval) ({ retval; })
513 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
514                                             optlen, retval) ({ retval; })
515 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
516                                        kernel_optval) ({ 0; })
517
518 #define for_each_cgroup_storage_type(stype) for (; false; )
519
520 #endif /* CONFIG_CGROUP_BPF */
521
522 #endif /* _BPF_CGROUP_H */