1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/bpf.h>
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
11 #include <net/sock.h>
12 #include <uapi/linux/bpf.h>
13 
14 struct sock;
15 struct sockaddr;
16 struct cgroup;
17 struct sk_buff;
18 struct bpf_map;
19 struct bpf_prog;
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
22 struct ctl_table;
23 struct ctl_table_header;
24 struct task_struct;
25 
26 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
27 				       const struct bpf_insn *insn);
28 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
29 					 const struct bpf_insn *insn);
30 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
31 					  const struct bpf_insn *insn);
32 
33 #ifdef CONFIG_CGROUP_BPF
34 
35 #define CGROUP_ATYPE(type) \
36 	case BPF_##type: return type
37 
38 static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)39 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
40 {
41 	switch (attach_type) {
42 	CGROUP_ATYPE(CGROUP_INET_INGRESS);
43 	CGROUP_ATYPE(CGROUP_INET_EGRESS);
44 	CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
45 	CGROUP_ATYPE(CGROUP_SOCK_OPS);
46 	CGROUP_ATYPE(CGROUP_DEVICE);
47 	CGROUP_ATYPE(CGROUP_INET4_BIND);
48 	CGROUP_ATYPE(CGROUP_INET6_BIND);
49 	CGROUP_ATYPE(CGROUP_INET4_CONNECT);
50 	CGROUP_ATYPE(CGROUP_INET6_CONNECT);
51 	CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
52 	CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
53 	CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
54 	CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
55 	CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
56 	CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
57 	CGROUP_ATYPE(CGROUP_SYSCTL);
58 	CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
59 	CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
60 	CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
61 	CGROUP_ATYPE(CGROUP_GETSOCKOPT);
62 	CGROUP_ATYPE(CGROUP_SETSOCKOPT);
63 	CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
64 	CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
65 	CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
66 	CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
67 	CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
68 	CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
69 	CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
70 	default:
71 		return CGROUP_BPF_ATTACH_TYPE_INVALID;
72 	}
73 }
74 
75 #undef CGROUP_ATYPE
76 
77 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
78 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
79 
80 #define for_each_cgroup_storage_type(stype) \
81 	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
82 
83 struct bpf_cgroup_storage_map;
84 
85 struct bpf_storage_buffer {
86 	struct rcu_head rcu;
87 	char data[];
88 };
89 
90 struct bpf_cgroup_storage {
91 	union {
92 		struct bpf_storage_buffer *buf;
93 		void __percpu *percpu_buf;
94 	};
95 	struct bpf_cgroup_storage_map *map;
96 	struct bpf_cgroup_storage_key key;
97 	struct list_head list_map;
98 	struct list_head list_cg;
99 	struct rb_node node;
100 	struct rcu_head rcu;
101 };
102 
103 struct bpf_cgroup_link {
104 	struct bpf_link link;
105 	struct cgroup *cgroup;
106 	enum bpf_attach_type type;
107 };
108 
109 struct bpf_prog_list {
110 	struct hlist_node node;
111 	struct bpf_prog *prog;
112 	struct bpf_cgroup_link *link;
113 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
114 };
115 
116 int cgroup_bpf_inherit(struct cgroup *cgrp);
117 void cgroup_bpf_offline(struct cgroup *cgrp);
118 
119 int __cgroup_bpf_run_filter_skb(struct sock *sk,
120 				struct sk_buff *skb,
121 				enum cgroup_bpf_attach_type atype);
122 
123 int __cgroup_bpf_run_filter_sk(struct sock *sk,
124 			       enum cgroup_bpf_attach_type atype);
125 
126 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
127 				      struct sockaddr *uaddr,
128 				      int *uaddrlen,
129 				      enum cgroup_bpf_attach_type atype,
130 				      void *t_ctx,
131 				      u32 *flags);
132 
133 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
134 				     struct bpf_sock_ops_kern *sock_ops,
135 				     enum cgroup_bpf_attach_type atype);
136 
137 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
138 				      short access, enum cgroup_bpf_attach_type atype);
139 
140 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
141 				   struct ctl_table *table, int write,
142 				   char **buf, size_t *pcount, loff_t *ppos,
143 				   enum cgroup_bpf_attach_type atype);
144 
145 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
146 				       int *optname, sockptr_t optval,
147 				       int *optlen, char **kernel_optval);
148 
149 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
150 				       int optname, sockptr_t optval,
151 				       sockptr_t optlen, int max_optlen,
152 				       int retval);
153 
154 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
155 					    int optname, void *optval,
156 					    int *optlen, int retval);
157 
cgroup_storage_type(struct bpf_map * map)158 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
159 	struct bpf_map *map)
160 {
161 	if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
162 		return BPF_CGROUP_STORAGE_PERCPU;
163 
164 	return BPF_CGROUP_STORAGE_SHARED;
165 }
166 
167 struct bpf_cgroup_storage *
168 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
169 		      void *key, bool locked);
170 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
171 					enum bpf_cgroup_storage_type stype);
172 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
173 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
174 			     struct cgroup *cgroup,
175 			     enum bpf_attach_type type);
176 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
177 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
178 
179 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
180 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
181 				     void *value, u64 flags);
182 
183 /* Opportunistic check to see whether we have any BPF program attached*/
cgroup_bpf_sock_enabled(struct sock * sk,enum cgroup_bpf_attach_type type)184 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
185 					   enum cgroup_bpf_attach_type type)
186 {
187 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
188 	struct bpf_prog_array *array;
189 
190 	array = rcu_access_pointer(cgrp->bpf.effective[type]);
191 	return array != &bpf_empty_prog_array.hdr;
192 }
193 
194 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
195 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
196 ({									      \
197 	int __ret = 0;							      \
198 	if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) &&			      \
199 	    cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS))		      \
200 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
201 						    CGROUP_INET_INGRESS); \
202 									      \
203 	__ret;								      \
204 })
205 
206 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
207 ({									       \
208 	int __ret = 0;							       \
209 	if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) {		       \
210 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
211 		if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) &&	       \
212 		    cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS))	       \
213 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
214 						      CGROUP_INET_EGRESS); \
215 	}								       \
216 	__ret;								       \
217 })
218 
219 #define BPF_CGROUP_RUN_SK_PROG(sk, atype)				       \
220 ({									       \
221 	int __ret = 0;							       \
222 	if (cgroup_bpf_enabled(atype)) {					       \
223 		__ret = __cgroup_bpf_run_filter_sk(sk, atype);		       \
224 	}								       \
225 	__ret;								       \
226 })
227 
228 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
229 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
230 
231 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)			       \
232 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
233 
234 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
235 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
236 
237 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
238 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
239 
240 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype)		       \
241 ({									       \
242 	int __ret = 0;							       \
243 	if (cgroup_bpf_enabled(atype))					       \
244 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
245 							  atype, NULL, NULL);  \
246 	__ret;								       \
247 })
248 
249 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx)	       \
250 ({									       \
251 	int __ret = 0;							       \
252 	if (cgroup_bpf_enabled(atype))	{				       \
253 		lock_sock(sk);						       \
254 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
255 							  atype, t_ctx, NULL); \
256 		release_sock(sk);					       \
257 	}								       \
258 	__ret;								       \
259 })
260 
261 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
262  * via upper bits of return code. The only flag that is supported
263  * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
264  * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
265  */
266 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
267 ({									       \
268 	u32 __flags = 0;						       \
269 	int __ret = 0;							       \
270 	if (cgroup_bpf_enabled(atype))	{				       \
271 		lock_sock(sk);						       \
272 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
273 							  atype, NULL, &__flags); \
274 		release_sock(sk);					       \
275 		if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)	       \
276 			*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;	       \
277 	}								       \
278 	__ret;								       \
279 })
280 
281 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)				       \
282 	((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||		       \
283 	  cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&		       \
284 	 (sk)->sk_prot->pre_connect)
285 
286 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen)			\
287 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
288 
289 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen)			\
290 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
291 
292 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
293 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
294 
295 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
296 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
297 
298 #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen)		\
299 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
300 
301 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
302 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
303 
304 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
305 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
306 
307 #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)	\
308 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
309 
310 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
311 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
312 
313 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
314 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
315 
316 #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen)		\
317 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
318 
319 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
320  * fullsock and its parent fullsock cannot be traced by
321  * sk_to_full_sk().
322  *
323  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
324  * Its listener-sk is not attached to the rsk_listener.
325  * In this case, the caller holds the listener-sk (unlocked),
326  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
327  * the listener-sk such that the cgroup-bpf-progs of the
328  * listener-sk will be run.
329  *
330  * Regardless of syncookie mode or not,
331  * calling bpf_setsockopt on listener-sk will not make sense anyway,
332  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
333  */
334 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)			\
335 ({									\
336 	int __ret = 0;							\
337 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))			\
338 		__ret = __cgroup_bpf_run_filter_sock_ops(sk,		\
339 							 sock_ops,	\
340 							 CGROUP_SOCK_OPS); \
341 	__ret;								\
342 })
343 
344 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
345 ({									       \
346 	int __ret = 0;							       \
347 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
348 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
349 		if (__sk && sk_fullsock(__sk))				       \
350 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
351 								 sock_ops,     \
352 							 CGROUP_SOCK_OPS); \
353 	}								       \
354 	__ret;								       \
355 })
356 
357 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)	      \
358 ({									      \
359 	int __ret = 0;							      \
360 	if (cgroup_bpf_enabled(CGROUP_DEVICE))			      \
361 		__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
362 							  access,	      \
363 							  CGROUP_DEVICE); \
364 									      \
365 	__ret;								      \
366 })
367 
368 
369 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
370 ({									       \
371 	int __ret = 0;							       \
372 	if (cgroup_bpf_enabled(CGROUP_SYSCTL))			       \
373 		__ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
374 						       buf, count, pos,        \
375 						       CGROUP_SYSCTL);     \
376 	__ret;								       \
377 })
378 
379 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
380 				       kernel_optval)			       \
381 ({									       \
382 	int __ret = 0;							       \
383 	if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) &&			       \
384 	    cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT))		       \
385 		__ret = __cgroup_bpf_run_filter_setsockopt(sock, level,	       \
386 							   optname, optval,    \
387 							   optlen,	       \
388 							   kernel_optval);     \
389 	__ret;								       \
390 })
391 
392 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)			       \
393 ({									       \
394 	int __ret = 0;							       \
395 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
396 		copy_from_sockptr(&__ret, optlen, sizeof(int));		       \
397 	__ret;								       \
398 })
399 
400 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
401 				       max_optlen, retval)		       \
402 ({									       \
403 	int __ret = retval;						       \
404 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) &&			       \
405 	    cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT))		       \
406 		if (!(sock)->sk_prot->bpf_bypass_getsockopt ||		       \
407 		    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
408 					tcp_bpf_bypass_getsockopt,	       \
409 					level, optname))		       \
410 			__ret = __cgroup_bpf_run_filter_getsockopt(	       \
411 				sock, level, optname, optval, optlen,	       \
412 				max_optlen, retval);			       \
413 	__ret;								       \
414 })
415 
416 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
417 					    optlen, retval)		       \
418 ({									       \
419 	int __ret = retval;						       \
420 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
421 		__ret = __cgroup_bpf_run_filter_getsockopt_kern(	       \
422 			sock, level, optname, optval, optlen, retval);	       \
423 	__ret;								       \
424 })
425 
426 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
427 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
428 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
429 			   enum bpf_prog_type ptype);
430 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
431 int cgroup_bpf_prog_query(const union bpf_attr *attr,
432 			  union bpf_attr __user *uattr);
433 
434 const struct bpf_func_proto *
435 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
436 const struct bpf_func_proto *
437 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
438 #else
439 
cgroup_bpf_inherit(struct cgroup * cgrp)440 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
cgroup_bpf_offline(struct cgroup * cgrp)441 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
442 
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)443 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
444 					 enum bpf_prog_type ptype,
445 					 struct bpf_prog *prog)
446 {
447 	return -EINVAL;
448 }
449 
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)450 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
451 					 enum bpf_prog_type ptype)
452 {
453 	return -EINVAL;
454 }
455 
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)456 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
457 					 struct bpf_prog *prog)
458 {
459 	return -EINVAL;
460 }
461 
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)462 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
463 					union bpf_attr __user *uattr)
464 {
465 	return -EINVAL;
466 }
467 
468 static inline const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)469 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
470 {
471 	return NULL;
472 }
473 
474 static inline const struct bpf_func_proto *
cgroup_current_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)475 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
476 {
477 	return NULL;
478 }
479 
bpf_cgroup_storage_assign(struct bpf_prog_aux * aux,struct bpf_map * map)480 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
481 					    struct bpf_map *map) { return 0; }
bpf_cgroup_storage_alloc(struct bpf_prog * prog,enum bpf_cgroup_storage_type stype)482 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
483 	struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)484 static inline void bpf_cgroup_storage_free(
485 	struct bpf_cgroup_storage *storage) {}
bpf_percpu_cgroup_storage_copy(struct bpf_map * map,void * key,void * value)486 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
487 						 void *value) {
488 	return 0;
489 }
bpf_percpu_cgroup_storage_update(struct bpf_map * map,void * key,void * value,u64 flags)490 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
491 					void *key, void *value, u64 flags) {
492 	return 0;
493 }
494 
495 #define cgroup_bpf_enabled(atype) (0)
496 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
497 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
498 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
499 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
500 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
501 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
502 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
503 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
504 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
505 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
506 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
507 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
508 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
509 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
510 #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
511 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
512 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
513 #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
514 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
515 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
516 #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
517 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
518 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
519 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
520 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
521 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
522 				       optlen, max_optlen, retval) ({ retval; })
523 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
524 					    optlen, retval) ({ retval; })
525 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
526 				       kernel_optval) ({ 0; })
527 
528 #define for_each_cgroup_storage_type(stype) for (; false; )
529 
530 #endif /* CONFIG_CGROUP_BPF */
531 
532 #endif /* _BPF_CGROUP_H */
533