xref: /linux/include/linux/bpf-cgroup.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/bpf.h>
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
11 #include <net/sock.h>
12 #include <uapi/linux/bpf.h>
13 
14 struct sock;
15 struct sockaddr;
16 struct cgroup;
17 struct sk_buff;
18 struct bpf_map;
19 struct bpf_prog;
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
22 struct ctl_table;
23 struct ctl_table_header;
24 struct task_struct;
25 
26 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
27 				       const struct bpf_insn *insn);
28 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
29 					 const struct bpf_insn *insn);
30 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
31 					  const struct bpf_insn *insn);
32 
33 #ifdef CONFIG_CGROUP_BPF
34 
35 #define CGROUP_ATYPE(type) \
36 	case BPF_##type: return type
37 
38 static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)39 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
40 {
41 	switch (attach_type) {
42 	CGROUP_ATYPE(CGROUP_INET_INGRESS);
43 	CGROUP_ATYPE(CGROUP_INET_EGRESS);
44 	CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
45 	CGROUP_ATYPE(CGROUP_SOCK_OPS);
46 	CGROUP_ATYPE(CGROUP_DEVICE);
47 	CGROUP_ATYPE(CGROUP_INET4_BIND);
48 	CGROUP_ATYPE(CGROUP_INET6_BIND);
49 	CGROUP_ATYPE(CGROUP_INET4_CONNECT);
50 	CGROUP_ATYPE(CGROUP_INET6_CONNECT);
51 	CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
52 	CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
53 	CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
54 	CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
55 	CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
56 	CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
57 	CGROUP_ATYPE(CGROUP_SYSCTL);
58 	CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
59 	CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
60 	CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
61 	CGROUP_ATYPE(CGROUP_GETSOCKOPT);
62 	CGROUP_ATYPE(CGROUP_SETSOCKOPT);
63 	CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
64 	CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
65 	CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
66 	CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
67 	CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
68 	CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
69 	CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
70 	default:
71 		return CGROUP_BPF_ATTACH_TYPE_INVALID;
72 	}
73 }
74 
75 #undef CGROUP_ATYPE
76 
77 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
78 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
79 
80 struct bpf_cgroup_storage_map;
81 
82 struct bpf_storage_buffer {
83 	struct rcu_head rcu;
84 	char data[];
85 };
86 
87 struct bpf_cgroup_storage {
88 	union {
89 		struct bpf_storage_buffer *buf;
90 		void __percpu *percpu_buf;
91 	};
92 	struct bpf_cgroup_storage_map *map;
93 	struct bpf_cgroup_storage_key key;
94 	struct list_head list_map;
95 	struct list_head list_cg;
96 	struct rb_node node;
97 	struct rcu_head rcu;
98 };
99 
100 struct bpf_cgroup_link {
101 	struct bpf_link link;
102 	struct cgroup *cgroup;
103 };
104 
105 struct bpf_prog_list {
106 	struct hlist_node node;
107 	struct bpf_prog *prog;
108 	struct bpf_cgroup_link *link;
109 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
110 	u32 flags;
111 };
112 
113 void __init cgroup_bpf_lifetime_notifier_init(void);
114 
115 int __cgroup_bpf_run_filter_skb(struct sock *sk,
116 				struct sk_buff *skb,
117 				enum cgroup_bpf_attach_type atype);
118 
119 int __cgroup_bpf_run_filter_sk(struct sock *sk,
120 			       enum cgroup_bpf_attach_type atype);
121 
122 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
123 				      struct sockaddr *uaddr,
124 				      int *uaddrlen,
125 				      enum cgroup_bpf_attach_type atype,
126 				      void *t_ctx,
127 				      u32 *flags);
128 
129 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
130 				     struct bpf_sock_ops_kern *sock_ops,
131 				     enum cgroup_bpf_attach_type atype);
132 
133 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
134 				      short access, enum cgroup_bpf_attach_type atype);
135 
136 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
137 				   const struct ctl_table *table, int write,
138 				   char **buf, size_t *pcount, loff_t *ppos,
139 				   enum cgroup_bpf_attach_type atype);
140 
141 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
142 				       int *optname, sockptr_t optval,
143 				       int *optlen, char **kernel_optval);
144 
145 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
146 				       int optname, sockptr_t optval,
147 				       sockptr_t optlen, int max_optlen,
148 				       int retval);
149 
150 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
151 					    int optname, void *optval,
152 					    int *optlen, int retval);
153 
cgroup_storage_type(struct bpf_map * map)154 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
155 	struct bpf_map *map)
156 {
157 	if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
158 		return BPF_CGROUP_STORAGE_PERCPU;
159 
160 	return BPF_CGROUP_STORAGE_SHARED;
161 }
162 
163 struct bpf_cgroup_storage *
164 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
165 		      void *key, bool locked);
166 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
167 					enum bpf_cgroup_storage_type stype);
168 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
169 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
170 			     struct cgroup *cgroup,
171 			     enum bpf_attach_type type);
172 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
173 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
174 
175 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
176 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
177 				     void *value, u64 flags);
178 
179 /* Opportunistic check to see whether we have any BPF program attached*/
cgroup_bpf_sock_enabled(struct sock * sk,enum cgroup_bpf_attach_type type)180 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
181 					   enum cgroup_bpf_attach_type type)
182 {
183 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
184 	struct bpf_prog_array *array;
185 
186 	array = rcu_access_pointer(cgrp->bpf.effective[type]);
187 	return array != &bpf_empty_prog_array.hdr;
188 }
189 
190 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
191 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
192 ({									      \
193 	int __ret = 0;							      \
194 	if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) &&			      \
195 	    cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk &&	      \
196 	    sk_fullsock(sk))						      \
197 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
198 						    CGROUP_INET_INGRESS); \
199 									      \
200 	__ret;								      \
201 })
202 
203 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
204 ({									       \
205 	int __ret = 0;							       \
206 	if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) {		       \
207 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
208 		if (__sk && __sk == skb_to_full_sk(skb) &&	       \
209 		    cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS))	       \
210 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
211 						      CGROUP_INET_EGRESS); \
212 	}								       \
213 	__ret;								       \
214 })
215 
216 #define BPF_CGROUP_RUN_SK_PROG(sk, atype)				       \
217 ({									       \
218 	int __ret = 0;							       \
219 	if (cgroup_bpf_enabled(atype)) {					       \
220 		__ret = __cgroup_bpf_run_filter_sk(sk, atype);		       \
221 	}								       \
222 	__ret;								       \
223 })
224 
225 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
226 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
227 
228 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)			       \
229 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
230 
231 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
232 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
233 
234 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
235 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
236 
237 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype)		       \
238 ({									       \
239 	int __ret = 0;							       \
240 	if (cgroup_bpf_enabled(atype))					       \
241 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
242 							  atype, NULL, NULL);  \
243 	__ret;								       \
244 })
245 
246 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx)	       \
247 ({									       \
248 	int __ret = 0;							       \
249 	if (cgroup_bpf_enabled(atype))	{				       \
250 		lock_sock(sk);						       \
251 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
252 							  atype, t_ctx, NULL); \
253 		release_sock(sk);					       \
254 	}								       \
255 	__ret;								       \
256 })
257 
258 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
259  * via upper bits of return code. The only flag that is supported
260  * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
261  * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
262  */
263 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
264 ({									       \
265 	u32 __flags = 0;						       \
266 	int __ret = 0;							       \
267 	if (cgroup_bpf_enabled(atype))	{				       \
268 		lock_sock(sk);						       \
269 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
270 							  atype, NULL, &__flags); \
271 		release_sock(sk);					       \
272 		if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)	       \
273 			*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;	       \
274 	}								       \
275 	__ret;								       \
276 })
277 
278 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)				       \
279 	((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||		       \
280 	  cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&		       \
281 	 (sk)->sk_prot->pre_connect)
282 
283 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen)			\
284 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
285 
286 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen)			\
287 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
288 
289 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
290 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
291 
292 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
293 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
294 
295 #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
296 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
297 
298 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
299 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
300 
301 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
302 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
303 
304 #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
305 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
306 
307 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
308 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
309 
310 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
311 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
312 
313 #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
314 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
315 
316 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
317  * fullsock and its parent fullsock cannot be traced by
318  * sk_to_full_sk().
319  *
320  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
321  * Its listener-sk is not attached to the rsk_listener.
322  * In this case, the caller holds the listener-sk (unlocked),
323  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
324  * the listener-sk such that the cgroup-bpf-progs of the
325  * listener-sk will be run.
326  *
327  * Regardless of syncookie mode or not,
328  * calling bpf_setsockopt on listener-sk will not make sense anyway,
329  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
330  */
331 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)			\
332 ({									\
333 	int __ret = 0;							\
334 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))			\
335 		__ret = __cgroup_bpf_run_filter_sock_ops(sk,		\
336 							 sock_ops,	\
337 							 CGROUP_SOCK_OPS); \
338 	__ret;								\
339 })
340 
341 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
342 ({									       \
343 	int __ret = 0;							       \
344 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
345 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
346 		if (__sk && sk_fullsock(__sk))				       \
347 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
348 								 sock_ops,     \
349 							 CGROUP_SOCK_OPS); \
350 	}								       \
351 	__ret;								       \
352 })
353 
354 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)	      \
355 ({									      \
356 	int __ret = 0;							      \
357 	if (cgroup_bpf_enabled(CGROUP_DEVICE))			      \
358 		__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
359 							  access,	      \
360 							  CGROUP_DEVICE); \
361 									      \
362 	__ret;								      \
363 })
364 
365 
366 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
367 ({									       \
368 	int __ret = 0;							       \
369 	if (cgroup_bpf_enabled(CGROUP_SYSCTL))			       \
370 		__ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
371 						       buf, count, pos,        \
372 						       CGROUP_SYSCTL);     \
373 	__ret;								       \
374 })
375 
376 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
377 				       kernel_optval)			       \
378 ({									       \
379 	int __ret = 0;							       \
380 	if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) &&			       \
381 	    cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT))		       \
382 		__ret = __cgroup_bpf_run_filter_setsockopt(sock, level,	       \
383 							   optname, optval,    \
384 							   optlen,	       \
385 							   kernel_optval);     \
386 	__ret;								       \
387 })
388 
389 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
390 				       max_optlen, retval)		       \
391 ({									       \
392 	int __ret = retval;						       \
393 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) &&			       \
394 	    cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT))		       \
395 		if (!(sock)->sk_prot->bpf_bypass_getsockopt ||		       \
396 		    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
397 					tcp_bpf_bypass_getsockopt,	       \
398 					level, optname))		       \
399 			__ret = __cgroup_bpf_run_filter_getsockopt(	       \
400 				sock, level, optname, optval, optlen,	       \
401 				max_optlen, retval);			       \
402 	__ret;								       \
403 })
404 
405 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
406 					    optlen, retval)		       \
407 ({									       \
408 	int __ret = retval;						       \
409 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
410 		__ret = __cgroup_bpf_run_filter_getsockopt_kern(	       \
411 			sock, level, optname, optval, optlen, retval);	       \
412 	__ret;								       \
413 })
414 
415 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
416 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
417 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
418 			   enum bpf_prog_type ptype);
419 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
420 int cgroup_bpf_prog_query(const union bpf_attr *attr,
421 			  union bpf_attr __user *uattr);
422 
423 const struct bpf_func_proto *
424 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
425 #else
426 
cgroup_bpf_lifetime_notifier_init(void)427 static inline void cgroup_bpf_lifetime_notifier_init(void)
428 {
429 	return;
430 }
431 
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)432 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
433 					 enum bpf_prog_type ptype,
434 					 struct bpf_prog *prog)
435 {
436 	return -EINVAL;
437 }
438 
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)439 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
440 					 enum bpf_prog_type ptype)
441 {
442 	return -EINVAL;
443 }
444 
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)445 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
446 					 struct bpf_prog *prog)
447 {
448 	return -EINVAL;
449 }
450 
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)451 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
452 					union bpf_attr __user *uattr)
453 {
454 	return -EINVAL;
455 }
456 
457 static inline const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)458 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
459 {
460 	return NULL;
461 }
462 
bpf_cgroup_storage_assign(struct bpf_prog_aux * aux,struct bpf_map * map)463 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
464 					    struct bpf_map *map) { return 0; }
bpf_cgroup_storage_alloc(struct bpf_prog * prog,enum bpf_cgroup_storage_type stype)465 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
466 	struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)467 static inline void bpf_cgroup_storage_free(
468 	struct bpf_cgroup_storage *storage) {}
bpf_percpu_cgroup_storage_copy(struct bpf_map * map,void * key,void * value)469 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
470 						 void *value) {
471 	return 0;
472 }
bpf_percpu_cgroup_storage_update(struct bpf_map * map,void * key,void * value,u64 flags)473 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
474 					void *key, void *value, u64 flags) {
475 	return 0;
476 }
477 
478 #define cgroup_bpf_enabled(atype) (0)
479 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
480 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
481 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
482 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
483 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
484 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
485 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
486 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
487 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
488 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
489 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
490 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
491 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
492 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
493 #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
494 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
495 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
496 #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
497 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
498 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
499 #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
500 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
501 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
502 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
503 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
504 				       optlen, max_optlen, retval) ({ retval; })
505 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
506 					    optlen, retval) ({ retval; })
507 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
508 				       kernel_optval) ({ 0; })
509 
510 #endif /* CONFIG_CGROUP_BPF */
511 
512 #endif /* _BPF_CGROUP_H */
513