1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/bpf.h>
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
11 #include <net/sock.h>
12 #include <uapi/linux/bpf.h>
13 
14 struct sock;
15 struct sockaddr;
16 struct cgroup;
17 struct sk_buff;
18 struct bpf_map;
19 struct bpf_prog;
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
22 struct ctl_table;
23 struct ctl_table_header;
24 struct task_struct;
25 
26 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
27 				       const struct bpf_insn *insn);
28 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
29 					 const struct bpf_insn *insn);
30 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
31 					  const struct bpf_insn *insn);
32 
33 #ifdef CONFIG_CGROUP_BPF
34 
35 #define CGROUP_ATYPE(type) \
36 	case BPF_##type: return type
37 
38 static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)39 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
40 {
41 	switch (attach_type) {
42 	CGROUP_ATYPE(CGROUP_INET_INGRESS);
43 	CGROUP_ATYPE(CGROUP_INET_EGRESS);
44 	CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
45 	CGROUP_ATYPE(CGROUP_SOCK_OPS);
46 	CGROUP_ATYPE(CGROUP_DEVICE);
47 	CGROUP_ATYPE(CGROUP_INET4_BIND);
48 	CGROUP_ATYPE(CGROUP_INET6_BIND);
49 	CGROUP_ATYPE(CGROUP_INET4_CONNECT);
50 	CGROUP_ATYPE(CGROUP_INET6_CONNECT);
51 	CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
52 	CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
53 	CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
54 	CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
55 	CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
56 	CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
57 	CGROUP_ATYPE(CGROUP_SYSCTL);
58 	CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
59 	CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
60 	CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
61 	CGROUP_ATYPE(CGROUP_GETSOCKOPT);
62 	CGROUP_ATYPE(CGROUP_SETSOCKOPT);
63 	CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
64 	CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
65 	CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
66 	CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
67 	CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
68 	CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
69 	CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
70 	default:
71 		return CGROUP_BPF_ATTACH_TYPE_INVALID;
72 	}
73 }
74 
75 #undef CGROUP_ATYPE
76 
77 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
78 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
79 
80 #define for_each_cgroup_storage_type(stype) \
81 	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
82 
83 struct bpf_cgroup_storage_map;
84 
85 struct bpf_storage_buffer {
86 	struct rcu_head rcu;
87 	char data[];
88 };
89 
90 struct bpf_cgroup_storage {
91 	union {
92 		struct bpf_storage_buffer *buf;
93 		void __percpu *percpu_buf;
94 	};
95 	struct bpf_cgroup_storage_map *map;
96 	struct bpf_cgroup_storage_key key;
97 	struct list_head list_map;
98 	struct list_head list_cg;
99 	struct rb_node node;
100 	struct rcu_head rcu;
101 };
102 
103 struct bpf_cgroup_link {
104 	struct bpf_link link;
105 	struct cgroup *cgroup;
106 	enum bpf_attach_type type;
107 };
108 
109 struct bpf_prog_list {
110 	struct hlist_node node;
111 	struct bpf_prog *prog;
112 	struct bpf_cgroup_link *link;
113 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
114 	u32 flags;
115 };
116 
117 int cgroup_bpf_inherit(struct cgroup *cgrp);
118 void cgroup_bpf_offline(struct cgroup *cgrp);
119 
120 int __cgroup_bpf_run_filter_skb(struct sock *sk,
121 				struct sk_buff *skb,
122 				enum cgroup_bpf_attach_type atype);
123 
124 int __cgroup_bpf_run_filter_sk(struct sock *sk,
125 			       enum cgroup_bpf_attach_type atype);
126 
127 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
128 				      struct sockaddr *uaddr,
129 				      int *uaddrlen,
130 				      enum cgroup_bpf_attach_type atype,
131 				      void *t_ctx,
132 				      u32 *flags);
133 
134 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
135 				     struct bpf_sock_ops_kern *sock_ops,
136 				     enum cgroup_bpf_attach_type atype);
137 
138 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
139 				      short access, enum cgroup_bpf_attach_type atype);
140 
141 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
142 				   const struct ctl_table *table, int write,
143 				   char **buf, size_t *pcount, loff_t *ppos,
144 				   enum cgroup_bpf_attach_type atype);
145 
146 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
147 				       int *optname, sockptr_t optval,
148 				       int *optlen, char **kernel_optval);
149 
150 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
151 				       int optname, sockptr_t optval,
152 				       sockptr_t optlen, int max_optlen,
153 				       int retval);
154 
155 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
156 					    int optname, void *optval,
157 					    int *optlen, int retval);
158 
cgroup_storage_type(struct bpf_map * map)159 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
160 	struct bpf_map *map)
161 {
162 	if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
163 		return BPF_CGROUP_STORAGE_PERCPU;
164 
165 	return BPF_CGROUP_STORAGE_SHARED;
166 }
167 
168 struct bpf_cgroup_storage *
169 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
170 		      void *key, bool locked);
171 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
172 					enum bpf_cgroup_storage_type stype);
173 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
174 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
175 			     struct cgroup *cgroup,
176 			     enum bpf_attach_type type);
177 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
178 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
179 
180 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
181 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
182 				     void *value, u64 flags);
183 
184 /* Opportunistic check to see whether we have any BPF program attached*/
cgroup_bpf_sock_enabled(struct sock * sk,enum cgroup_bpf_attach_type type)185 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
186 					   enum cgroup_bpf_attach_type type)
187 {
188 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
189 	struct bpf_prog_array *array;
190 
191 	array = rcu_access_pointer(cgrp->bpf.effective[type]);
192 	return array != &bpf_empty_prog_array.hdr;
193 }
194 
195 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
196 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
197 ({									      \
198 	int __ret = 0;							      \
199 	if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) &&			      \
200 	    cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk &&	      \
201 	    sk_fullsock(sk))						      \
202 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
203 						    CGROUP_INET_INGRESS); \
204 									      \
205 	__ret;								      \
206 })
207 
208 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
209 ({									       \
210 	int __ret = 0;							       \
211 	if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) {		       \
212 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
213 		if (__sk && __sk == skb_to_full_sk(skb) &&	       \
214 		    cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS))	       \
215 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
216 						      CGROUP_INET_EGRESS); \
217 	}								       \
218 	__ret;								       \
219 })
220 
221 #define BPF_CGROUP_RUN_SK_PROG(sk, atype)				       \
222 ({									       \
223 	int __ret = 0;							       \
224 	if (cgroup_bpf_enabled(atype)) {					       \
225 		__ret = __cgroup_bpf_run_filter_sk(sk, atype);		       \
226 	}								       \
227 	__ret;								       \
228 })
229 
230 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
231 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
232 
233 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)			       \
234 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
235 
236 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
237 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
238 
239 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
240 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
241 
242 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype)		       \
243 ({									       \
244 	int __ret = 0;							       \
245 	if (cgroup_bpf_enabled(atype))					       \
246 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
247 							  atype, NULL, NULL);  \
248 	__ret;								       \
249 })
250 
251 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx)	       \
252 ({									       \
253 	int __ret = 0;							       \
254 	if (cgroup_bpf_enabled(atype))	{				       \
255 		lock_sock(sk);						       \
256 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
257 							  atype, t_ctx, NULL); \
258 		release_sock(sk);					       \
259 	}								       \
260 	__ret;								       \
261 })
262 
263 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
264  * via upper bits of return code. The only flag that is supported
265  * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
266  * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
267  */
268 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
269 ({									       \
270 	u32 __flags = 0;						       \
271 	int __ret = 0;							       \
272 	if (cgroup_bpf_enabled(atype))	{				       \
273 		lock_sock(sk);						       \
274 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
275 							  atype, NULL, &__flags); \
276 		release_sock(sk);					       \
277 		if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)	       \
278 			*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;	       \
279 	}								       \
280 	__ret;								       \
281 })
282 
283 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)				       \
284 	((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||		       \
285 	  cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&		       \
286 	 (sk)->sk_prot->pre_connect)
287 
288 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen)			\
289 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
290 
291 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen)			\
292 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
293 
294 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
295 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
296 
297 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
298 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
299 
300 #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
301 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
302 
303 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
304 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
305 
306 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
307 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
308 
309 #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
310 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
311 
312 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
313 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
314 
315 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
316 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
317 
318 #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
319 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
320 
321 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
322  * fullsock and its parent fullsock cannot be traced by
323  * sk_to_full_sk().
324  *
325  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
326  * Its listener-sk is not attached to the rsk_listener.
327  * In this case, the caller holds the listener-sk (unlocked),
328  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
329  * the listener-sk such that the cgroup-bpf-progs of the
330  * listener-sk will be run.
331  *
332  * Regardless of syncookie mode or not,
333  * calling bpf_setsockopt on listener-sk will not make sense anyway,
334  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
335  */
336 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)			\
337 ({									\
338 	int __ret = 0;							\
339 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))			\
340 		__ret = __cgroup_bpf_run_filter_sock_ops(sk,		\
341 							 sock_ops,	\
342 							 CGROUP_SOCK_OPS); \
343 	__ret;								\
344 })
345 
346 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
347 ({									       \
348 	int __ret = 0;							       \
349 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
350 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
351 		if (__sk && sk_fullsock(__sk))				       \
352 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
353 								 sock_ops,     \
354 							 CGROUP_SOCK_OPS); \
355 	}								       \
356 	__ret;								       \
357 })
358 
359 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)	      \
360 ({									      \
361 	int __ret = 0;							      \
362 	if (cgroup_bpf_enabled(CGROUP_DEVICE))			      \
363 		__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
364 							  access,	      \
365 							  CGROUP_DEVICE); \
366 									      \
367 	__ret;								      \
368 })
369 
370 
371 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
372 ({									       \
373 	int __ret = 0;							       \
374 	if (cgroup_bpf_enabled(CGROUP_SYSCTL))			       \
375 		__ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
376 						       buf, count, pos,        \
377 						       CGROUP_SYSCTL);     \
378 	__ret;								       \
379 })
380 
381 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
382 				       kernel_optval)			       \
383 ({									       \
384 	int __ret = 0;							       \
385 	if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) &&			       \
386 	    cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT))		       \
387 		__ret = __cgroup_bpf_run_filter_setsockopt(sock, level,	       \
388 							   optname, optval,    \
389 							   optlen,	       \
390 							   kernel_optval);     \
391 	__ret;								       \
392 })
393 
394 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
395 				       max_optlen, retval)		       \
396 ({									       \
397 	int __ret = retval;						       \
398 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) &&			       \
399 	    cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT))		       \
400 		if (!(sock)->sk_prot->bpf_bypass_getsockopt ||		       \
401 		    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
402 					tcp_bpf_bypass_getsockopt,	       \
403 					level, optname))		       \
404 			__ret = __cgroup_bpf_run_filter_getsockopt(	       \
405 				sock, level, optname, optval, optlen,	       \
406 				max_optlen, retval);			       \
407 	__ret;								       \
408 })
409 
410 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
411 					    optlen, retval)		       \
412 ({									       \
413 	int __ret = retval;						       \
414 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
415 		__ret = __cgroup_bpf_run_filter_getsockopt_kern(	       \
416 			sock, level, optname, optval, optlen, retval);	       \
417 	__ret;								       \
418 })
419 
420 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
421 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
422 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
423 			   enum bpf_prog_type ptype);
424 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
425 int cgroup_bpf_prog_query(const union bpf_attr *attr,
426 			  union bpf_attr __user *uattr);
427 
428 const struct bpf_func_proto *
429 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
430 const struct bpf_func_proto *
431 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
432 #else
433 
cgroup_bpf_inherit(struct cgroup * cgrp)434 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
cgroup_bpf_offline(struct cgroup * cgrp)435 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
436 
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)437 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
438 					 enum bpf_prog_type ptype,
439 					 struct bpf_prog *prog)
440 {
441 	return -EINVAL;
442 }
443 
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)444 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
445 					 enum bpf_prog_type ptype)
446 {
447 	return -EINVAL;
448 }
449 
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)450 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
451 					 struct bpf_prog *prog)
452 {
453 	return -EINVAL;
454 }
455 
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)456 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
457 					union bpf_attr __user *uattr)
458 {
459 	return -EINVAL;
460 }
461 
462 static inline const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)463 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
464 {
465 	return NULL;
466 }
467 
468 static inline const struct bpf_func_proto *
cgroup_current_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)469 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
470 {
471 	return NULL;
472 }
473 
bpf_cgroup_storage_assign(struct bpf_prog_aux * aux,struct bpf_map * map)474 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
475 					    struct bpf_map *map) { return 0; }
bpf_cgroup_storage_alloc(struct bpf_prog * prog,enum bpf_cgroup_storage_type stype)476 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
477 	struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)478 static inline void bpf_cgroup_storage_free(
479 	struct bpf_cgroup_storage *storage) {}
bpf_percpu_cgroup_storage_copy(struct bpf_map * map,void * key,void * value)480 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
481 						 void *value) {
482 	return 0;
483 }
bpf_percpu_cgroup_storage_update(struct bpf_map * map,void * key,void * value,u64 flags)484 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
485 					void *key, void *value, u64 flags) {
486 	return 0;
487 }
488 
489 #define cgroup_bpf_enabled(atype) (0)
490 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
491 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
492 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
493 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
494 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
495 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
496 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
497 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
498 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
499 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
500 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
501 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
502 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
503 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
504 #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
505 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
506 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
507 #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
508 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
509 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
510 #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
511 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
512 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
513 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
514 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
515 				       optlen, max_optlen, retval) ({ retval; })
516 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
517 					    optlen, retval) ({ retval; })
518 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
519 				       kernel_optval) ({ 0; })
520 
521 #define for_each_cgroup_storage_type(stype) for (; false; )
522 
523 #endif /* CONFIG_CGROUP_BPF */
524 
525 #endif /* _BPF_CGROUP_H */
526