xref: /linux/include/linux/bpf_verifier.h (revision 9187210eee7d87eea37b45ea93454a88681894a4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_VERIFIER_H
5 #define _LINUX_BPF_VERIFIER_H 1
6 
7 #include <linux/bpf.h> /* for enum bpf_reg_type */
8 #include <linux/btf.h> /* for struct btf and btf_id() */
9 #include <linux/filter.h> /* for MAX_BPF_STACK */
10 #include <linux/tnum.h>
11 
12 /* Maximum variable offset umax_value permitted when resolving memory accesses.
13  * In practice this is far bigger than any realistic pointer offset; this limit
14  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
15  */
16 #define BPF_MAX_VAR_OFF	(1 << 29)
17 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
18  * that converting umax_value to int cannot overflow.
19  */
20 #define BPF_MAX_VAR_SIZ	(1 << 29)
21 /* size of tmp_str_buf in bpf_verifier.
22  * we need at least 306 bytes to fit full stack mask representation
23  * (in the "-8,-16,...,-512" form)
24  */
25 #define TMP_STR_BUF_LEN 320
26 /* Patch buffer size */
27 #define INSN_BUF_SIZE 32
28 
29 #define ITER_PREFIX "bpf_iter_"
30 
31 enum bpf_iter_state {
32 	BPF_ITER_STATE_INVALID, /* for non-first slot */
33 	BPF_ITER_STATE_ACTIVE,
34 	BPF_ITER_STATE_DRAINED,
35 };
36 
37 struct bpf_reg_state {
38 	/* Ordering of fields matters.  See states_equal() */
39 	enum bpf_reg_type type;
40 	/*
41 	 * Fixed part of pointer offset, pointer types only.
42 	 * Or constant delta between "linked" scalars with the same ID.
43 	 */
44 	s32 off;
45 	union {
46 		/* valid when type == PTR_TO_PACKET */
47 		int range;
48 
49 		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
50 		 *   PTR_TO_MAP_VALUE_OR_NULL
51 		 */
52 		struct {
53 			struct bpf_map *map_ptr;
54 			/* To distinguish map lookups from outer map
55 			 * the map_uid is non-zero for registers
56 			 * pointing to inner maps.
57 			 */
58 			u32 map_uid;
59 		};
60 
61 		/* for PTR_TO_BTF_ID */
62 		struct {
63 			struct btf *btf;
64 			u32 btf_id;
65 		};
66 
67 		struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
68 			u32 mem_size;
69 			u32 dynptr_id; /* for dynptr slices */
70 		};
71 
72 		/* For dynptr stack slots */
73 		struct {
74 			enum bpf_dynptr_type type;
75 			/* A dynptr is 16 bytes so it takes up 2 stack slots.
76 			 * We need to track which slot is the first slot
77 			 * to protect against cases where the user may try to
78 			 * pass in an address starting at the second slot of the
79 			 * dynptr.
80 			 */
81 			bool first_slot;
82 		} dynptr;
83 
84 		/* For bpf_iter stack slots */
85 		struct {
86 			/* BTF container and BTF type ID describing
87 			 * struct bpf_iter_<type> of an iterator state
88 			 */
89 			struct btf *btf;
90 			u32 btf_id;
91 			/* packing following two fields to fit iter state into 16 bytes */
92 			enum bpf_iter_state state:2;
93 			int depth:30;
94 		} iter;
95 
96 		/* For irq stack slots */
97 		struct {
98 			enum {
99 				IRQ_NATIVE_KFUNC,
100 				IRQ_LOCK_KFUNC,
101 			} kfunc_class;
102 		} irq;
103 
104 		/* Max size from any of the above. */
105 		struct {
106 			unsigned long raw1;
107 			unsigned long raw2;
108 		} raw;
109 
110 		u32 subprogno; /* for PTR_TO_FUNC */
111 	};
112 	/* For scalar types (SCALAR_VALUE), this represents our knowledge of
113 	 * the actual value.
114 	 * For pointer types, this represents the variable part of the offset
115 	 * from the pointed-to object, and is shared with all bpf_reg_states
116 	 * with the same id as us.
117 	 */
118 	struct tnum var_off;
119 	/* Used to determine if any memory access using this register will
120 	 * result in a bad access.
121 	 * These refer to the same value as var_off, not necessarily the actual
122 	 * contents of the register.
123 	 */
124 	s64 smin_value; /* minimum possible (s64)value */
125 	s64 smax_value; /* maximum possible (s64)value */
126 	u64 umin_value; /* minimum possible (u64)value */
127 	u64 umax_value; /* maximum possible (u64)value */
128 	s32 s32_min_value; /* minimum possible (s32)value */
129 	s32 s32_max_value; /* maximum possible (s32)value */
130 	u32 u32_min_value; /* minimum possible (u32)value */
131 	u32 u32_max_value; /* maximum possible (u32)value */
132 	/* For PTR_TO_PACKET, used to find other pointers with the same variable
133 	 * offset, so they can share range knowledge.
134 	 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
135 	 * came from, when one is tested for != NULL.
136 	 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
137 	 * for the purpose of tracking that it's freed.
138 	 * For PTR_TO_SOCKET this is used to share which pointers retain the
139 	 * same reference to the socket, to determine proper reference freeing.
140 	 * For stack slots that are dynptrs, this is used to track references to
141 	 * the dynptr to determine proper reference freeing.
142 	 * Similarly to dynptrs, we use ID to track "belonging" of a reference
143 	 * to a specific instance of bpf_iter.
144 	 */
145 	/*
146 	 * Upper bit of ID is used to remember relationship between "linked"
147 	 * registers. Example:
148 	 * r1 = r2;    both will have r1->id == r2->id == N
149 	 * r1 += 10;   r1->id == N | BPF_ADD_CONST and r1->off == 10
150 	 */
151 #define BPF_ADD_CONST (1U << 31)
152 	u32 id;
153 	/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
154 	 * from a pointer-cast helper, bpf_sk_fullsock() and
155 	 * bpf_tcp_sock().
156 	 *
157 	 * Consider the following where "sk" is a reference counted
158 	 * pointer returned from "sk = bpf_sk_lookup_tcp();":
159 	 *
160 	 * 1: sk = bpf_sk_lookup_tcp();
161 	 * 2: if (!sk) { return 0; }
162 	 * 3: fullsock = bpf_sk_fullsock(sk);
163 	 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
164 	 * 5: tp = bpf_tcp_sock(fullsock);
165 	 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
166 	 * 7: bpf_sk_release(sk);
167 	 * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
168 	 *
169 	 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
170 	 * "tp" ptr should be invalidated also.  In order to do that,
171 	 * the reg holding "fullsock" and "sk" need to remember
172 	 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
173 	 * such that the verifier can reset all regs which have
174 	 * ref_obj_id matching the sk_reg->id.
175 	 *
176 	 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
177 	 * sk_reg->id will stay as NULL-marking purpose only.
178 	 * After NULL-marking is done, sk_reg->id can be reset to 0.
179 	 *
180 	 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
181 	 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
182 	 *
183 	 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
184 	 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
185 	 * which is the same as sk_reg->ref_obj_id.
186 	 *
187 	 * From the verifier perspective, if sk, fullsock and tp
188 	 * are not NULL, they are the same ptr with different
189 	 * reg->type.  In particular, bpf_sk_release(tp) is also
190 	 * allowed and has the same effect as bpf_sk_release(sk).
191 	 */
192 	u32 ref_obj_id;
193 	/* Inside the callee two registers can be both PTR_TO_STACK like
194 	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
195 	 * while another to the caller's stack. To differentiate them 'frameno'
196 	 * is used which is an index in bpf_verifier_state->frame[] array
197 	 * pointing to bpf_func_state.
198 	 */
199 	u32 frameno;
200 	/* Tracks subreg definition. The stored value is the insn_idx of the
201 	 * writing insn. This is safe because subreg_def is used before any insn
202 	 * patching which only happens after main verification finished.
203 	 */
204 	s32 subreg_def;
205 	/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
206 	bool precise;
207 };
208 
209 enum bpf_stack_slot_type {
210 	STACK_INVALID,    /* nothing was stored in this stack slot */
211 	STACK_SPILL,      /* register spilled into stack */
212 	STACK_MISC,	  /* BPF program wrote some data into this slot */
213 	STACK_ZERO,	  /* BPF program wrote constant zero */
214 	/* A dynptr is stored in this stack slot. The type of dynptr
215 	 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
216 	 */
217 	STACK_DYNPTR,
218 	STACK_ITER,
219 	STACK_IRQ_FLAG,
220 };
221 
222 #define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
223 
224 #define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
225 			  (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
226 			  (1 << BPF_REG_5))
227 
228 #define BPF_DYNPTR_SIZE		sizeof(struct bpf_dynptr_kern)
229 #define BPF_DYNPTR_NR_SLOTS		(BPF_DYNPTR_SIZE / BPF_REG_SIZE)
230 
231 struct bpf_stack_state {
232 	struct bpf_reg_state spilled_ptr;
233 	u8 slot_type[BPF_REG_SIZE];
234 };
235 
236 struct bpf_reference_state {
237 	/* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
238 	 * default to pointer reference on zero initialization of a state.
239 	 */
240 	enum ref_state_type {
241 		REF_TYPE_PTR		= (1 << 1),
242 		REF_TYPE_IRQ		= (1 << 2),
243 		REF_TYPE_LOCK		= (1 << 3),
244 		REF_TYPE_RES_LOCK 	= (1 << 4),
245 		REF_TYPE_RES_LOCK_IRQ	= (1 << 5),
246 		REF_TYPE_LOCK_MASK	= REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
247 	} type;
248 	/* Track each reference created with a unique id, even if the same
249 	 * instruction creates the reference multiple times (eg, via CALL).
250 	 */
251 	int id;
252 	/* Instruction where the allocation of this reference occurred. This
253 	 * is used purely to inform the user of a reference leak.
254 	 */
255 	int insn_idx;
256 	/* Use to keep track of the source object of a lock, to ensure
257 	 * it matches on unlock.
258 	 */
259 	void *ptr;
260 };
261 
262 struct bpf_retval_range {
263 	s32 minval;
264 	s32 maxval;
265 };
266 
267 /* state of the program:
268  * type of all registers and stack info
269  */
270 struct bpf_func_state {
271 	struct bpf_reg_state regs[MAX_BPF_REG];
272 	/* index of call instruction that called into this func */
273 	int callsite;
274 	/* stack frame number of this function state from pov of
275 	 * enclosing bpf_verifier_state.
276 	 * 0 = main function, 1 = first callee.
277 	 */
278 	u32 frameno;
279 	/* subprog number == index within subprog_info
280 	 * zero == main subprog
281 	 */
282 	u32 subprogno;
283 	/* Every bpf_timer_start will increment async_entry_cnt.
284 	 * It's used to distinguish:
285 	 * void foo(void) { for(;;); }
286 	 * void foo(void) { bpf_timer_set_callback(,foo); }
287 	 */
288 	u32 async_entry_cnt;
289 	struct bpf_retval_range callback_ret_range;
290 	bool in_callback_fn;
291 	bool in_async_callback_fn;
292 	bool in_exception_callback_fn;
293 	/* For callback calling functions that limit number of possible
294 	 * callback executions (e.g. bpf_loop) keeps track of current
295 	 * simulated iteration number.
296 	 * Value in frame N refers to number of times callback with frame
297 	 * N+1 was simulated, e.g. for the following call:
298 	 *
299 	 *   bpf_loop(..., fn, ...); | suppose current frame is N
300 	 *                           | fn would be simulated in frame N+1
301 	 *                           | number of simulations is tracked in frame N
302 	 */
303 	u32 callback_depth;
304 
305 	/* The following fields should be last. See copy_func_state() */
306 	/* The state of the stack. Each element of the array describes BPF_REG_SIZE
307 	 * (i.e. 8) bytes worth of stack memory.
308 	 * stack[0] represents bytes [*(r10-8)..*(r10-1)]
309 	 * stack[1] represents bytes [*(r10-16)..*(r10-9)]
310 	 * ...
311 	 * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
312 	 */
313 	struct bpf_stack_state *stack;
314 	/* Size of the current stack, in bytes. The stack state is tracked below, in
315 	 * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
316 	 */
317 	int allocated_stack;
318 };
319 
320 #define MAX_CALL_FRAMES 8
321 
322 /* instruction history flags, used in bpf_jmp_history_entry.flags field */
323 enum {
324 	/* instruction references stack slot through PTR_TO_STACK register;
325 	 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
326 	 * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
327 	 * 8 bytes per slot, so slot index (spi) is [0, 63])
328 	 */
329 	INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
330 
331 	INSN_F_SPI_MASK = 0x3f, /* 6 bits */
332 	INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
333 
334 	INSN_F_STACK_ACCESS = BIT(9),
335 
336 	INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
337 	INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
338 	/* total 12 bits are used now. */
339 };
340 
341 static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
342 static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
343 
344 struct bpf_jmp_history_entry {
345 	u32 idx;
346 	/* insn idx can't be bigger than 1 million */
347 	u32 prev_idx : 20;
348 	/* special INSN_F_xxx flags */
349 	u32 flags : 12;
350 	/* additional registers that need precision tracking when this
351 	 * jump is backtracked, vector of six 10-bit records
352 	 */
353 	u64 linked_regs;
354 };
355 
356 /* Maximum number of register states that can exist at once */
357 #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
358 struct bpf_verifier_state {
359 	/* call stack tracking */
360 	struct bpf_func_state *frame[MAX_CALL_FRAMES];
361 	struct bpf_verifier_state *parent;
362 	/* Acquired reference states */
363 	struct bpf_reference_state *refs;
364 	/*
365 	 * 'branches' field is the number of branches left to explore:
366 	 * 0 - all possible paths from this state reached bpf_exit or
367 	 * were safely pruned
368 	 * 1 - at least one path is being explored.
369 	 * This state hasn't reached bpf_exit
370 	 * 2 - at least two paths are being explored.
371 	 * This state is an immediate parent of two children.
372 	 * One is fallthrough branch with branches==1 and another
373 	 * state is pushed into stack (to be explored later) also with
374 	 * branches==1. The parent of this state has branches==1.
375 	 * The verifier state tree connected via 'parent' pointer looks like:
376 	 * 1
377 	 * 1
378 	 * 2 -> 1 (first 'if' pushed into stack)
379 	 * 1
380 	 * 2 -> 1 (second 'if' pushed into stack)
381 	 * 1
382 	 * 1
383 	 * 1 bpf_exit.
384 	 *
385 	 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
386 	 * and the verifier state tree will look:
387 	 * 1
388 	 * 1
389 	 * 2 -> 1 (first 'if' pushed into stack)
390 	 * 1
391 	 * 1 -> 1 (second 'if' pushed into stack)
392 	 * 0
393 	 * 0
394 	 * 0 bpf_exit.
395 	 * After pop_stack() the do_check() will resume at second 'if'.
396 	 *
397 	 * If is_state_visited() sees a state with branches > 0 it means
398 	 * there is a loop. If such state is exactly equal to the current state
399 	 * it's an infinite loop. Note states_equal() checks for states
400 	 * equivalency, so two states being 'states_equal' does not mean
401 	 * infinite loop. The exact comparison is provided by
402 	 * states_maybe_looping() function. It's a stronger pre-check and
403 	 * much faster than states_equal().
404 	 *
405 	 * This algorithm may not find all possible infinite loops or
406 	 * loop iteration count may be too high.
407 	 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
408 	 */
409 	u32 branches;
410 	u32 insn_idx;
411 	u32 curframe;
412 
413 	u32 acquired_refs;
414 	u32 active_locks;
415 	u32 active_preempt_locks;
416 	u32 active_irq_id;
417 	u32 active_lock_id;
418 	void *active_lock_ptr;
419 	bool active_rcu_lock;
420 
421 	bool speculative;
422 	bool in_sleepable;
423 	bool cleaned;
424 
425 	/* first and last insn idx of this verifier state */
426 	u32 first_insn_idx;
427 	u32 last_insn_idx;
428 	/* if this state is a backedge state then equal_state
429 	 * records cached state to which this state is equal.
430 	 */
431 	struct bpf_verifier_state *equal_state;
432 	/* jmp history recorded from first to last.
433 	 * backtracking is using it to go from last to first.
434 	 * For most states jmp_history_cnt is [0-3].
435 	 * For loops can go up to ~40.
436 	 */
437 	struct bpf_jmp_history_entry *jmp_history;
438 	u32 jmp_history_cnt;
439 	u32 dfs_depth;
440 	u32 callback_unroll_depth;
441 	u32 may_goto_depth;
442 };
443 
444 #define bpf_get_spilled_reg(slot, frame, mask)				\
445 	(((slot < frame->allocated_stack / BPF_REG_SIZE) &&		\
446 	  ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
447 	 ? &frame->stack[slot].spilled_ptr : NULL)
448 
449 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
450 #define bpf_for_each_spilled_reg(iter, frame, reg, mask)			\
451 	for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask);		\
452 	     iter < frame->allocated_stack / BPF_REG_SIZE;		\
453 	     iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
454 
455 #define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr)   \
456 	({                                                               \
457 		struct bpf_verifier_state *___vstate = __vst;            \
458 		int ___i, ___j;                                          \
459 		for (___i = 0; ___i <= ___vstate->curframe; ___i++) {    \
460 			struct bpf_reg_state *___regs;                   \
461 			__state = ___vstate->frame[___i];                \
462 			___regs = __state->regs;                         \
463 			for (___j = 0; ___j < MAX_BPF_REG; ___j++) {     \
464 				__reg = &___regs[___j];                  \
465 				(void)(__expr);                          \
466 			}                                                \
467 			bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
468 				if (!__reg)                              \
469 					continue;                        \
470 				(void)(__expr);                          \
471 			}                                                \
472 		}                                                        \
473 	})
474 
475 /* Invoke __expr over regsiters in __vst, setting __state and __reg */
476 #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
477 	bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
478 
479 /* linked list of verifier states used to prune search */
480 struct bpf_verifier_state_list {
481 	struct bpf_verifier_state state;
482 	struct list_head node;
483 	u32 miss_cnt;
484 	u32 hit_cnt:31;
485 	u32 in_free_list:1;
486 };
487 
488 struct bpf_loop_inline_state {
489 	unsigned int initialized:1; /* set to true upon first entry */
490 	unsigned int fit_for_inline:1; /* true if callback function is the same
491 					* at each call and flags are always zero
492 					*/
493 	u32 callback_subprogno; /* valid when fit_for_inline is true */
494 };
495 
496 /* pointer and state for maps */
497 struct bpf_map_ptr_state {
498 	struct bpf_map *map_ptr;
499 	bool poison;
500 	bool unpriv;
501 };
502 
503 /* Possible states for alu_state member. */
504 #define BPF_ALU_SANITIZE_SRC		(1U << 0)
505 #define BPF_ALU_SANITIZE_DST		(1U << 1)
506 #define BPF_ALU_NEG_VALUE		(1U << 2)
507 #define BPF_ALU_NON_POINTER		(1U << 3)
508 #define BPF_ALU_IMMEDIATE		(1U << 4)
509 #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
510 					 BPF_ALU_SANITIZE_DST)
511 
512 struct bpf_insn_aux_data {
513 	union {
514 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
515 		struct bpf_map_ptr_state map_ptr_state;
516 		s32 call_imm;			/* saved imm field of call insn */
517 		u32 alu_limit;			/* limit for add/sub register with pointer */
518 		struct {
519 			u32 map_index;		/* index into used_maps[] */
520 			u32 map_off;		/* offset from value base address */
521 		};
522 		struct {
523 			enum bpf_reg_type reg_type;	/* type of pseudo_btf_id */
524 			union {
525 				struct {
526 					struct btf *btf;
527 					u32 btf_id;	/* btf_id for struct typed var */
528 				};
529 				u32 mem_size;	/* mem_size for non-struct typed var */
530 			};
531 		} btf_var;
532 		/* if instruction is a call to bpf_loop this field tracks
533 		 * the state of the relevant registers to make decision about inlining
534 		 */
535 		struct bpf_loop_inline_state loop_inline_state;
536 	};
537 	union {
538 		/* remember the size of type passed to bpf_obj_new to rewrite R1 */
539 		u64 obj_new_size;
540 		/* remember the offset of node field within type to rewrite */
541 		u64 insert_off;
542 	};
543 	struct btf_struct_meta *kptr_struct_meta;
544 	u64 map_key_state; /* constant (32 bit) key tracking for maps */
545 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
546 	u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
547 	bool nospec; /* do not execute this instruction speculatively */
548 	bool nospec_result; /* result is unsafe under speculation, nospec must follow */
549 	bool zext_dst; /* this insn zero extends dst reg */
550 	bool needs_zext; /* alu op needs to clear upper bits */
551 	bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
552 	bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
553 	bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
554 	u8 alu_state; /* used in combination with alu_limit */
555 	/* true if STX or LDX instruction is a part of a spill/fill
556 	 * pattern for a bpf_fastcall call.
557 	 */
558 	u8 fastcall_pattern:1;
559 	/* for CALL instructions, a number of spill/fill pairs in the
560 	 * bpf_fastcall pattern.
561 	 */
562 	u8 fastcall_spills_num:3;
563 	u8 arg_prog:4;
564 
565 	/* below fields are initialized once */
566 	unsigned int orig_idx; /* original instruction index */
567 	bool jmp_point;
568 	bool prune_point;
569 	/* ensure we check state equivalence and save state checkpoint and
570 	 * this instruction, regardless of any heuristics
571 	 */
572 	bool force_checkpoint;
573 	/* true if instruction is a call to a helper function that
574 	 * accepts callback function as a parameter.
575 	 */
576 	bool calls_callback;
577 	/*
578 	 * CFG strongly connected component this instruction belongs to,
579 	 * zero if it is a singleton SCC.
580 	 */
581 	u32 scc;
582 	/* registers alive before this instruction. */
583 	u16 live_regs_before;
584 };
585 
586 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
587 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
588 
589 #define BPF_VERIFIER_TMP_LOG_SIZE	1024
590 
591 struct bpf_verifier_log {
592 	/* Logical start and end positions of a "log window" of the verifier log.
593 	 * start_pos == 0 means we haven't truncated anything.
594 	 * Once truncation starts to happen, start_pos + len_total == end_pos,
595 	 * except during log reset situations, in which (end_pos - start_pos)
596 	 * might get smaller than len_total (see bpf_vlog_reset()).
597 	 * Generally, (end_pos - start_pos) gives number of useful data in
598 	 * user log buffer.
599 	 */
600 	u64 start_pos;
601 	u64 end_pos;
602 	char __user *ubuf;
603 	u32 level;
604 	u32 len_total;
605 	u32 len_max;
606 	char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
607 };
608 
609 #define BPF_LOG_LEVEL1	1
610 #define BPF_LOG_LEVEL2	2
611 #define BPF_LOG_STATS	4
612 #define BPF_LOG_FIXED	8
613 #define BPF_LOG_LEVEL	(BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
614 #define BPF_LOG_MASK	(BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
615 #define BPF_LOG_KERNEL	(BPF_LOG_MASK + 1) /* kernel internal flag */
616 #define BPF_LOG_MIN_ALIGNMENT 8U
617 #define BPF_LOG_ALIGNMENT 40U
618 
bpf_verifier_log_needed(const struct bpf_verifier_log * log)619 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
620 {
621 	return log && log->level;
622 }
623 
624 #define BPF_MAX_SUBPROGS 256
625 
626 struct bpf_subprog_arg_info {
627 	enum bpf_arg_type arg_type;
628 	union {
629 		u32 mem_size;
630 		u32 btf_id;
631 	};
632 };
633 
634 enum priv_stack_mode {
635 	PRIV_STACK_UNKNOWN,
636 	NO_PRIV_STACK,
637 	PRIV_STACK_ADAPTIVE,
638 };
639 
640 struct bpf_subprog_info {
641 	/* 'start' has to be the first field otherwise find_subprog() won't work */
642 	u32 start; /* insn idx of function entry point */
643 	u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
644 	u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
645 	u16 stack_depth; /* max. stack depth used by this function */
646 	u16 stack_extra;
647 	/* offsets in range [stack_depth .. fastcall_stack_off)
648 	 * are used for bpf_fastcall spills and fills.
649 	 */
650 	s16 fastcall_stack_off;
651 	bool has_tail_call: 1;
652 	bool tail_call_reachable: 1;
653 	bool has_ld_abs: 1;
654 	bool is_cb: 1;
655 	bool is_async_cb: 1;
656 	bool is_exception_cb: 1;
657 	bool args_cached: 1;
658 	/* true if bpf_fastcall stack region is used by functions that can't be inlined */
659 	bool keep_fastcall_stack: 1;
660 	bool changes_pkt_data: 1;
661 	bool might_sleep: 1;
662 
663 	enum priv_stack_mode priv_stack_mode;
664 	u8 arg_cnt;
665 	struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
666 };
667 
668 struct bpf_verifier_env;
669 
670 struct backtrack_state {
671 	struct bpf_verifier_env *env;
672 	u32 frame;
673 	u32 reg_masks[MAX_CALL_FRAMES];
674 	u64 stack_masks[MAX_CALL_FRAMES];
675 };
676 
677 struct bpf_id_pair {
678 	u32 old;
679 	u32 cur;
680 };
681 
682 struct bpf_idmap {
683 	u32 tmp_id_gen;
684 	struct bpf_id_pair map[BPF_ID_MAP_SIZE];
685 };
686 
687 struct bpf_idset {
688 	u32 count;
689 	u32 ids[BPF_ID_MAP_SIZE];
690 };
691 
692 /* see verifier.c:compute_scc_callchain() */
693 struct bpf_scc_callchain {
694 	/* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */
695 	u32 callsites[MAX_CALL_FRAMES - 1];
696 	/* last frame in a chain is identified by SCC id */
697 	u32 scc;
698 };
699 
700 /* verifier state waiting for propagate_backedges() */
701 struct bpf_scc_backedge {
702 	struct bpf_scc_backedge *next;
703 	struct bpf_verifier_state state;
704 };
705 
706 struct bpf_scc_visit {
707 	struct bpf_scc_callchain callchain;
708 	/* first state in current verification path that entered SCC
709 	 * identified by the callchain
710 	 */
711 	struct bpf_verifier_state *entry_state;
712 	struct bpf_scc_backedge *backedges; /* list of backedges */
713 	u32 num_backedges;
714 };
715 
716 /* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc
717  * but having different bpf_scc_callchain->callsites.
718  */
719 struct bpf_scc_info {
720 	u32 num_visits;
721 	struct bpf_scc_visit visits[];
722 };
723 
724 struct bpf_liveness;
725 
726 /* single container for all structs
727  * one verifier_env per bpf_check() call
728  */
729 struct bpf_verifier_env {
730 	u32 insn_idx;
731 	u32 prev_insn_idx;
732 	struct bpf_prog *prog;		/* eBPF program being verified */
733 	const struct bpf_verifier_ops *ops;
734 	struct module *attach_btf_mod;	/* The owner module of prog->aux->attach_btf */
735 	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
736 	int stack_size;			/* number of states to be processed */
737 	bool strict_alignment;		/* perform strict pointer alignment checks */
738 	bool test_state_freq;		/* test verifier with different pruning frequency */
739 	bool test_reg_invariants;	/* fail verification on register invariants violations */
740 	struct bpf_verifier_state *cur_state; /* current verifier state */
741 	/* Search pruning optimization, array of list_heads for
742 	 * lists of struct bpf_verifier_state_list.
743 	 */
744 	struct list_head *explored_states;
745 	struct list_head free_list;	/* list of struct bpf_verifier_state_list */
746 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
747 	struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
748 	u32 used_map_cnt;		/* number of used maps */
749 	u32 used_btf_cnt;		/* number of used BTF objects */
750 	u32 id_gen;			/* used to generate unique reg IDs */
751 	u32 hidden_subprog_cnt;		/* number of hidden subprogs */
752 	int exception_callback_subprog;
753 	bool explore_alu_limits;
754 	bool allow_ptr_leaks;
755 	/* Allow access to uninitialized stack memory. Writes with fixed offset are
756 	 * always allowed, so this refers to reads (with fixed or variable offset),
757 	 * to writes with variable offset and to indirect (helper) accesses.
758 	 */
759 	bool allow_uninit_stack;
760 	bool bpf_capable;
761 	bool bypass_spec_v1;
762 	bool bypass_spec_v4;
763 	bool seen_direct_write;
764 	bool seen_exception;
765 	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
766 	const struct bpf_line_info *prev_linfo;
767 	struct bpf_verifier_log log;
768 	struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
769 	union {
770 		struct bpf_idmap idmap_scratch;
771 		struct bpf_idset idset_scratch;
772 	};
773 	struct {
774 		int *insn_state;
775 		int *insn_stack;
776 		/*
777 		 * vector of instruction indexes sorted in post-order, grouped by subprogram,
778 		 * see bpf_subprog_info->postorder_start.
779 		 */
780 		int *insn_postorder;
781 		int cur_stack;
782 		/* current position in the insn_postorder vector */
783 		int cur_postorder;
784 	} cfg;
785 	struct backtrack_state bt;
786 	struct bpf_jmp_history_entry *cur_hist_ent;
787 	u32 pass_cnt; /* number of times do_check() was called */
788 	u32 subprog_cnt;
789 	/* number of instructions analyzed by the verifier */
790 	u32 prev_insn_processed, insn_processed;
791 	/* number of jmps, calls, exits analyzed so far */
792 	u32 prev_jmps_processed, jmps_processed;
793 	/* total verification time */
794 	u64 verification_time;
795 	/* maximum number of verifier states kept in 'branching' instructions */
796 	u32 max_states_per_insn;
797 	/* total number of allocated verifier states */
798 	u32 total_states;
799 	/* some states are freed during program analysis.
800 	 * this is peak number of states. this number dominates kernel
801 	 * memory consumption during verification
802 	 */
803 	u32 peak_states;
804 	/* longest register parentage chain walked for liveness marking */
805 	u32 longest_mark_read_walk;
806 	u32 free_list_size;
807 	u32 explored_states_size;
808 	u32 num_backedges;
809 	bpfptr_t fd_array;
810 
811 	/* bit mask to keep track of whether a register has been accessed
812 	 * since the last time the function state was printed
813 	 */
814 	u32 scratched_regs;
815 	/* Same as scratched_regs but for stack slots */
816 	u64 scratched_stack_slots;
817 	u64 prev_log_pos, prev_insn_print_pos;
818 	/* buffer used to temporary hold constants as scalar registers */
819 	struct bpf_reg_state fake_reg[2];
820 	/* buffer used to generate temporary string representations,
821 	 * e.g., in reg_type_str() to generate reg_type string
822 	 */
823 	char tmp_str_buf[TMP_STR_BUF_LEN];
824 	struct bpf_insn insn_buf[INSN_BUF_SIZE];
825 	struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
826 	struct bpf_scc_callchain callchain_buf;
827 	struct bpf_liveness *liveness;
828 	/* array of pointers to bpf_scc_info indexed by SCC id */
829 	struct bpf_scc_info **scc_info;
830 	u32 scc_cnt;
831 };
832 
subprog_aux(struct bpf_verifier_env * env,int subprog)833 static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
834 {
835 	return &env->prog->aux->func_info_aux[subprog];
836 }
837 
subprog_info(struct bpf_verifier_env * env,int subprog)838 static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
839 {
840 	return &env->subprog_info[subprog];
841 }
842 
843 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
844 				      const char *fmt, va_list args);
845 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
846 					   const char *fmt, ...);
847 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
848 			    const char *fmt, ...);
849 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
850 		  char __user *log_buf, u32 log_size);
851 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
852 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
853 
854 __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
855 				  u32 insn_off,
856 				  const char *prefix_fmt, ...);
857 
858 #define verifier_bug_if(cond, env, fmt, args...)						\
859 	({											\
860 		bool __cond = (cond);								\
861 		if (unlikely(__cond))								\
862 			verifier_bug(env, fmt " (" #cond ")", ##args);				\
863 		(__cond);									\
864 	})
865 #define verifier_bug(env, fmt, args...)								\
866 	({											\
867 		BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args);				\
868 		bpf_log(&env->log, "verifier bug: " fmt "\n", ##args);				\
869 	})
870 
cur_func(struct bpf_verifier_env * env)871 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
872 {
873 	struct bpf_verifier_state *cur = env->cur_state;
874 
875 	return cur->frame[cur->curframe];
876 }
877 
cur_regs(struct bpf_verifier_env * env)878 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
879 {
880 	return cur_func(env)->regs;
881 }
882 
883 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
884 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
885 				 int insn_idx, int prev_insn_idx);
886 int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
887 void
888 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
889 			      struct bpf_insn *insn);
890 void
891 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
892 
893 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
bpf_trampoline_compute_key(const struct bpf_prog * tgt_prog,struct btf * btf,u32 btf_id)894 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
895 					     struct btf *btf, u32 btf_id)
896 {
897 	if (tgt_prog)
898 		return ((u64)tgt_prog->aux->id << 32) | btf_id;
899 	else
900 		return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
901 }
902 
903 /* unpack the IDs from the key as constructed above */
bpf_trampoline_unpack_key(u64 key,u32 * obj_id,u32 * btf_id)904 static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
905 {
906 	if (obj_id)
907 		*obj_id = key >> 32;
908 	if (btf_id)
909 		*btf_id = key & 0x7FFFFFFF;
910 }
911 
912 int bpf_check_attach_target(struct bpf_verifier_log *log,
913 			    const struct bpf_prog *prog,
914 			    const struct bpf_prog *tgt_prog,
915 			    u32 btf_id,
916 			    struct bpf_attach_target_info *tgt_info);
917 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
918 
919 int mark_chain_precision(struct bpf_verifier_env *env, int regno);
920 
921 #define BPF_BASE_TYPE_MASK	GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
922 
923 /* extract base type from bpf_{arg, return, reg}_type. */
base_type(u32 type)924 static inline u32 base_type(u32 type)
925 {
926 	return type & BPF_BASE_TYPE_MASK;
927 }
928 
929 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
type_flag(u32 type)930 static inline u32 type_flag(u32 type)
931 {
932 	return type & ~BPF_BASE_TYPE_MASK;
933 }
934 
935 /* only use after check_attach_btf_id() */
resolve_prog_type(const struct bpf_prog * prog)936 static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
937 {
938 	return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
939 		prog->aux->saved_dst_prog_type : prog->type;
940 }
941 
bpf_prog_check_recur(const struct bpf_prog * prog)942 static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
943 {
944 	switch (resolve_prog_type(prog)) {
945 	case BPF_PROG_TYPE_TRACING:
946 		return prog->expected_attach_type != BPF_TRACE_ITER;
947 	case BPF_PROG_TYPE_STRUCT_OPS:
948 		return prog->aux->jits_use_priv_stack;
949 	case BPF_PROG_TYPE_LSM:
950 	case BPF_PROG_TYPE_SYSCALL:
951 		return false;
952 	default:
953 		return true;
954 	}
955 }
956 
957 #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
958 
bpf_type_has_unsafe_modifiers(u32 type)959 static inline bool bpf_type_has_unsafe_modifiers(u32 type)
960 {
961 	return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
962 }
963 
type_is_ptr_alloc_obj(u32 type)964 static inline bool type_is_ptr_alloc_obj(u32 type)
965 {
966 	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
967 }
968 
type_is_non_owning_ref(u32 type)969 static inline bool type_is_non_owning_ref(u32 type)
970 {
971 	return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
972 }
973 
type_is_pkt_pointer(enum bpf_reg_type type)974 static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
975 {
976 	type = base_type(type);
977 	return type == PTR_TO_PACKET ||
978 	       type == PTR_TO_PACKET_META;
979 }
980 
type_is_sk_pointer(enum bpf_reg_type type)981 static inline bool type_is_sk_pointer(enum bpf_reg_type type)
982 {
983 	return type == PTR_TO_SOCKET ||
984 		type == PTR_TO_SOCK_COMMON ||
985 		type == PTR_TO_TCP_SOCK ||
986 		type == PTR_TO_XDP_SOCK;
987 }
988 
type_may_be_null(u32 type)989 static inline bool type_may_be_null(u32 type)
990 {
991 	return type & PTR_MAYBE_NULL;
992 }
993 
mark_reg_scratched(struct bpf_verifier_env * env,u32 regno)994 static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
995 {
996 	env->scratched_regs |= 1U << regno;
997 }
998 
mark_stack_slot_scratched(struct bpf_verifier_env * env,u32 spi)999 static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
1000 {
1001 	env->scratched_stack_slots |= 1ULL << spi;
1002 }
1003 
reg_scratched(const struct bpf_verifier_env * env,u32 regno)1004 static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
1005 {
1006 	return (env->scratched_regs >> regno) & 1;
1007 }
1008 
stack_slot_scratched(const struct bpf_verifier_env * env,u64 regno)1009 static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
1010 {
1011 	return (env->scratched_stack_slots >> regno) & 1;
1012 }
1013 
verifier_state_scratched(const struct bpf_verifier_env * env)1014 static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
1015 {
1016 	return env->scratched_regs || env->scratched_stack_slots;
1017 }
1018 
mark_verifier_state_clean(struct bpf_verifier_env * env)1019 static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
1020 {
1021 	env->scratched_regs = 0U;
1022 	env->scratched_stack_slots = 0ULL;
1023 }
1024 
1025 /* Used for printing the entire verifier state. */
mark_verifier_state_scratched(struct bpf_verifier_env * env)1026 static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
1027 {
1028 	env->scratched_regs = ~0U;
1029 	env->scratched_stack_slots = ~0ULL;
1030 }
1031 
bpf_stack_narrow_access_ok(int off,int fill_size,int spill_size)1032 static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
1033 {
1034 #ifdef __BIG_ENDIAN
1035 	off -= spill_size - fill_size;
1036 #endif
1037 
1038 	return !(off % BPF_REG_SIZE);
1039 }
1040 
1041 const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
1042 const char *dynptr_type_str(enum bpf_dynptr_type type);
1043 const char *iter_type_str(const struct btf *btf, u32 btf_id);
1044 const char *iter_state_str(enum bpf_iter_state state);
1045 
1046 void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1047 			  u32 frameno, bool print_all);
1048 void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1049 		      u32 frameno);
1050 
1051 struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
1052 int bpf_jmp_offset(struct bpf_insn *insn);
1053 int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2]);
1054 void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
1055 bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
1056 
1057 int bpf_stack_liveness_init(struct bpf_verifier_env *env);
1058 void bpf_stack_liveness_free(struct bpf_verifier_env *env);
1059 int bpf_update_live_stack(struct bpf_verifier_env *env);
1060 int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
1061 void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
1062 int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
1063 int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
1064 int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
1065 bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
1066 void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
1067 
1068 #endif /* _LINUX_BPF_VERIFIER_H */
1069