Lines Matching +full:assert +full:- +full:falling +full:- +full:edge

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <linux/bpf-cgroup.h>
23 #include <linux/error-injection.h>
52 * The first pass is depth-first-search to check that the program is a DAG.
54 * - larger than BPF_MAXINSNS insns
55 * - if loop is present (detected via back-edge)
56 * - unreachable insns exist (shouldn't be a forest. program = one function)
57 * - out of bounds or malformed jumps
69 * All registers are 64-bit.
70 * R0 - return register
71 * R1-R5 argument passing registers
72 * R6-R9 callee saved registers
73 * R10 - frame pointer read-only
80 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
85 * (and -20 constant is saved for further stack bounds checking).
125 * [key, key + map->key_size) bytes are valid and were initialized on
131 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
135 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
136 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
138 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
143 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
151 * After the call R0 is set to return type of the function and registers R1-R5
157 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
163 * passes through a NULL-check conditional. For the branch wherein the state is
212 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; in bpf_map_ptr_poisoned()
217 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; in bpf_map_ptr_unpriv()
225 aux->map_ptr_state = (unsigned long)map | in bpf_map_ptr_store()
231 return aux->map_key_state & BPF_MAP_KEY_POISON; in bpf_map_key_poisoned()
236 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); in bpf_map_key_unseen()
241 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); in bpf_map_key_immediate()
248 aux->map_key_state = state | BPF_MAP_KEY_SEEN | in bpf_map_key_store()
254 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_helper_call()
255 insn->src_reg == 0; in bpf_helper_call()
260 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_call()
261 insn->src_reg == BPF_PSEUDO_CALL; in bpf_pseudo_call()
266 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_kfunc_call()
267 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; in bpf_pseudo_kfunc_call()
310 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
311 * generally to pass info about user-defined local kptr types to later
346 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); in btf_type_name()
357 if (!bpf_verifier_log_needed(&env->log)) in verbose()
361 bpf_verifier_vlog(&env->log, fmt, args); in verbose()
373 if (reg->smin_value > S64_MIN) { in verbose_invalid_scalar()
374 verbose(env, " smin=%lld", reg->smin_value); in verbose_invalid_scalar()
377 if (reg->smax_value < S64_MAX) { in verbose_invalid_scalar()
378 verbose(env, " smax=%lld", reg->smax_value); in verbose_invalid_scalar()
395 type = reg->type; in reg_not_null()
414 if (reg->type == PTR_TO_MAP_VALUE) { in reg_btf_record()
415 rec = reg->map_ptr->record; in reg_btf_record()
416 } else if (type_is_ptr_alloc_obj(reg->type)) { in reg_btf_record()
417 meta = btf_find_struct_meta(reg->btf, reg->btf_id); in reg_btf_record()
419 rec = meta->record; in reg_btf_record()
426 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; in subprog_is_global()
435 if (!env->prog->aux->func_info) in subprog_name()
438 info = &env->prog->aux->func_info[subprog]; in subprog_name()
439 return btf_type_name(env->prog->aux->btf, info->type_id); in subprog_name()
446 info->is_cb = true; in mark_subprog_exc_cb()
447 info->is_async_cb = true; in mark_subprog_exc_cb()
448 info->is_exception_cb = true; in mark_subprog_exc_cb()
453 return subprog_info(env, subprog)->is_exception_cb; in subprog_is_exc_cb()
469 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; in is_acquire_function()
527 return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || in is_sync_callback_calling_insn()
528 (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); in is_sync_callback_calling_insn()
556 return BPF_CLASS(insn->code) == BPF_STX && in is_cmpxchg_insn()
557 BPF_MODE(insn->code) == BPF_ATOMIC && in is_cmpxchg_insn()
558 insn->imm == BPF_CMPXCHG; in is_cmpxchg_insn()
563 return (-off - 1) / BPF_REG_SIZE; in __get_spi()
569 struct bpf_verifier_state *cur = env->cur_state; in func()
571 return cur->frame[reg->frameno]; in func()
576 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
578 /* We need to check that slots between [spi - nr_slots + 1, spi] are in is_spi_bounds_valid()
583 * spi and the second slot will be at spi - 1. in is_spi_bounds_valid()
585 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; in is_spi_bounds_valid()
593 if (!tnum_is_const(reg->var_off)) { in stack_slot_obj_get_spi()
595 return -EINVAL; in stack_slot_obj_get_spi()
598 off = reg->off + reg->var_off.value; in stack_slot_obj_get_spi()
601 return -EINVAL; in stack_slot_obj_get_spi()
607 return -EINVAL; in stack_slot_obj_get_spi()
611 return -ERANGE; in stack_slot_obj_get_spi()
674 int id = ++env->id_gen; in mark_dynptr_stack_regs()
684 __mark_dynptr_reg(reg, type, true, ++env->id_gen); in mark_dynptr_cb_reg()
701 /* We cannot assume both spi and spi - 1 belong to the same dynptr, in mark_stack_slots_dynptr()
713 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); in mark_stack_slots_dynptr()
718 state->stack[spi].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
719 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
724 return -EINVAL; in mark_stack_slots_dynptr()
726 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, in mark_stack_slots_dynptr()
727 &state->stack[spi - 1].spilled_ptr, type); in mark_stack_slots_dynptr()
741 state->stack[spi].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
742 state->stack[spi - 1].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
745 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in mark_stack_slots_dynptr()
746 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in mark_stack_slots_dynptr()
756 state->stack[spi].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
757 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
760 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in invalidate_dynptr()
761 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in invalidate_dynptr()
769 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of in invalidate_dynptr()
774 * default (where the default reg state has its reg->parent as NULL), or in invalidate_dynptr()
776 * mark_reg_read won't walk reg->parent chain), but not randomly during in invalidate_dynptr()
778 * parentage chain will still be live (i.e. reg->parent may be in invalidate_dynptr()
779 * non-NULL), while earlier reg->parent was NULL, so we need in invalidate_dynptr()
784 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in invalidate_dynptr()
785 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in invalidate_dynptr()
797 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in unmark_stack_slots_dynptr()
802 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; in unmark_stack_slots_dynptr()
815 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
816 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) in unmark_stack_slots_dynptr()
823 if (state->stack[i].slot_type[0] != STACK_DYNPTR) { in unmark_stack_slots_dynptr()
825 return -EFAULT; in unmark_stack_slots_dynptr()
827 if (state->stack[i].spilled_ptr.dynptr.first_slot) in unmark_stack_slots_dynptr()
839 if (!env->allow_ptr_leaks) in mark_reg_invalid()
857 if (state->stack[spi].slot_type[0] != STACK_DYNPTR) in destroy_if_dynptr_stack_slot()
861 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in destroy_if_dynptr_stack_slot()
864 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in destroy_if_dynptr_stack_slot()
866 return -EINVAL; in destroy_if_dynptr_stack_slot()
870 mark_stack_slot_scratched(env, spi - 1); in destroy_if_dynptr_stack_slot()
874 state->stack[spi].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
875 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
878 dynptr_id = state->stack[spi].spilled_ptr.id; in destroy_if_dynptr_stack_slot()
880 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ in destroy_if_dynptr_stack_slot()
882 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) in destroy_if_dynptr_stack_slot()
884 if (dreg->dynptr_id == dynptr_id) in destroy_if_dynptr_stack_slot()
891 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in destroy_if_dynptr_stack_slot()
892 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in destroy_if_dynptr_stack_slot()
895 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in destroy_if_dynptr_stack_slot()
896 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in destroy_if_dynptr_stack_slot()
905 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_uninit()
910 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an in is_dynptr_reg_valid_uninit()
914 if (spi < 0 && spi != -ERANGE) in is_dynptr_reg_valid_uninit()
940 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_init()
946 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in is_dynptr_reg_valid_init()
950 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || in is_dynptr_reg_valid_init()
951 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) in is_dynptr_reg_valid_init()
970 if (reg->type == CONST_PTR_TO_DYNPTR) { in is_dynptr_type_expected()
971 return reg->dynptr.type == dynptr_type; in is_dynptr_type_expected()
976 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; in is_dynptr_type_expected()
1003 struct bpf_stack_state *slot = &state->stack[spi - i]; in mark_stack_slots_iter()
1004 struct bpf_reg_state *st = &slot->spilled_ptr; in mark_stack_slots_iter()
1007 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ in mark_stack_slots_iter()
1010 st->type |= MEM_RCU; in mark_stack_slots_iter()
1012 st->type |= PTR_UNTRUSTED; in mark_stack_slots_iter()
1014 st->live |= REG_LIVE_WRITTEN; in mark_stack_slots_iter()
1015 st->ref_obj_id = i == 0 ? id : 0; in mark_stack_slots_iter()
1016 st->iter.btf = btf; in mark_stack_slots_iter()
1017 st->iter.btf_id = btf_id; in mark_stack_slots_iter()
1018 st->iter.state = BPF_ITER_STATE_ACTIVE; in mark_stack_slots_iter()
1019 st->iter.depth = 0; in mark_stack_slots_iter()
1022 slot->slot_type[j] = STACK_ITER; in mark_stack_slots_iter()
1024 mark_stack_slot_scratched(env, spi - i); in mark_stack_slots_iter()
1041 struct bpf_stack_state *slot = &state->stack[spi - i]; in unmark_stack_slots_iter()
1042 struct bpf_reg_state *st = &slot->spilled_ptr; in unmark_stack_slots_iter()
1045 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); in unmark_stack_slots_iter()
1050 st->live |= REG_LIVE_WRITTEN; in unmark_stack_slots_iter()
1053 slot->slot_type[j] = STACK_INVALID; in unmark_stack_slots_iter()
1055 mark_stack_slot_scratched(env, spi - i); in unmark_stack_slots_iter()
1067 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we in is_iter_reg_valid_uninit()
1072 if (spi == -ERANGE) in is_iter_reg_valid_uninit()
1078 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_uninit()
1081 if (slot->slot_type[j] == STACK_ITER) in is_iter_reg_valid_uninit()
1096 return -EINVAL; in is_iter_reg_valid_init()
1099 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_init()
1100 struct bpf_reg_state *st = &slot->spilled_ptr; in is_iter_reg_valid_init()
1102 if (st->type & PTR_UNTRUSTED) in is_iter_reg_valid_init()
1103 return -EPROTO; in is_iter_reg_valid_init()
1105 if (i == 0 && !st->ref_obj_id) in is_iter_reg_valid_init()
1106 return -EINVAL; in is_iter_reg_valid_init()
1107 if (i != 0 && st->ref_obj_id) in is_iter_reg_valid_init()
1108 return -EINVAL; in is_iter_reg_valid_init()
1109 if (st->iter.btf != btf || st->iter.btf_id != btf_id) in is_iter_reg_valid_init()
1110 return -EINVAL; in is_iter_reg_valid_init()
1113 if (slot->slot_type[j] != STACK_ITER) in is_iter_reg_valid_init()
1114 return -EINVAL; in is_iter_reg_valid_init()
1121 * - spilled register state (STACK_SPILL);
1122 * - dynptr state (STACK_DYNPTR);
1123 * - iter state (STACK_ITER).
1127 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; in is_stack_slot_special()
1149 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1154 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && in is_spilled_scalar_reg()
1155 stack->spilled_ptr.type == SCALAR_VALUE; in is_spilled_scalar_reg()
1162 * env->allow_ptr_leaks into account and force STACK_MISC, if necessary.
1168 if (env->allow_ptr_leaks && *stype == STACK_INVALID) in mark_stack_slot_misc()
1232 memset(arr + old_n * size, 0, (new_n - old_n) * size); in realloc_array()
1240 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, in copy_reference_state()
1242 if (!dst->refs) in copy_reference_state()
1243 return -ENOMEM; in copy_reference_state()
1245 dst->acquired_refs = src->acquired_refs; in copy_reference_state()
1251 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1253 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), in copy_stack_state()
1255 if (!dst->stack) in copy_stack_state()
1256 return -ENOMEM; in copy_stack_state()
1258 dst->allocated_stack = src->allocated_stack; in copy_stack_state()
1264 state->refs = realloc_array(state->refs, state->acquired_refs, n, in resize_reference_state()
1266 if (!state->refs) in resize_reference_state()
1267 return -ENOMEM; in resize_reference_state()
1269 state->acquired_refs = n; in resize_reference_state()
1273 /* Possibly update state->allocated_stack to be at least size bytes. Also
1274 * possibly update the function's high-water mark in its bpf_subprog_info.
1278 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n; in grow_stack_state()
1287 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); in grow_stack_state()
1288 if (!state->stack) in grow_stack_state()
1289 return -ENOMEM; in grow_stack_state()
1291 state->allocated_stack = size; in grow_stack_state()
1294 if (env->subprog_info[state->subprogno].stack_depth < size) in grow_stack_state()
1295 env->subprog_info[state->subprogno].stack_depth = size; in grow_stack_state()
1300 /* Acquire a pointer id from the env and update the state->refs to include
1308 int new_ofs = state->acquired_refs; in acquire_reference_state()
1311 err = resize_reference_state(state, state->acquired_refs + 1); in acquire_reference_state()
1314 id = ++env->id_gen; in acquire_reference_state()
1315 state->refs[new_ofs].id = id; in acquire_reference_state()
1316 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
1317 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; in acquire_reference_state()
1327 last_idx = state->acquired_refs - 1; in release_reference_state()
1328 for (i = 0; i < state->acquired_refs; i++) { in release_reference_state()
1329 if (state->refs[i].id == ptr_id) { in release_reference_state()
1331 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) in release_reference_state()
1332 return -EINVAL; in release_reference_state()
1334 memcpy(&state->refs[i], &state->refs[last_idx], in release_reference_state()
1335 sizeof(*state->refs)); in release_reference_state()
1336 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); in release_reference_state()
1337 state->acquired_refs--; in release_reference_state()
1341 return -EINVAL; in release_reference_state()
1348 kfree(state->refs); in free_func_state()
1349 kfree(state->stack); in free_func_state()
1355 kfree(state->jmp_history); in clear_jmp_history()
1356 state->jmp_history = NULL; in clear_jmp_history()
1357 state->jmp_history_cnt = 0; in clear_jmp_history()
1365 for (i = 0; i <= state->curframe; i++) { in free_verifier_state()
1366 free_func_state(state->frame[i]); in free_verifier_state()
1367 state->frame[i] = NULL; in free_verifier_state()
1395 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, in copy_verifier_state()
1396 src->jmp_history_cnt, sizeof(*dst_state->jmp_history), in copy_verifier_state()
1398 if (!dst_state->jmp_history) in copy_verifier_state()
1399 return -ENOMEM; in copy_verifier_state()
1400 dst_state->jmp_history_cnt = src->jmp_history_cnt; in copy_verifier_state()
1405 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { in copy_verifier_state()
1406 free_func_state(dst_state->frame[i]); in copy_verifier_state()
1407 dst_state->frame[i] = NULL; in copy_verifier_state()
1409 dst_state->speculative = src->speculative; in copy_verifier_state()
1410 dst_state->active_rcu_lock = src->active_rcu_lock; in copy_verifier_state()
1411 dst_state->curframe = src->curframe; in copy_verifier_state()
1412 dst_state->active_lock.ptr = src->active_lock.ptr; in copy_verifier_state()
1413 dst_state->active_lock.id = src->active_lock.id; in copy_verifier_state()
1414 dst_state->branches = src->branches; in copy_verifier_state()
1415 dst_state->parent = src->parent; in copy_verifier_state()
1416 dst_state->first_insn_idx = src->first_insn_idx; in copy_verifier_state()
1417 dst_state->last_insn_idx = src->last_insn_idx; in copy_verifier_state()
1418 dst_state->dfs_depth = src->dfs_depth; in copy_verifier_state()
1419 dst_state->callback_unroll_depth = src->callback_unroll_depth; in copy_verifier_state()
1420 dst_state->used_as_loop_entry = src->used_as_loop_entry; in copy_verifier_state()
1421 for (i = 0; i <= src->curframe; i++) { in copy_verifier_state()
1422 dst = dst_state->frame[i]; in copy_verifier_state()
1426 return -ENOMEM; in copy_verifier_state()
1427 dst_state->frame[i] = dst; in copy_verifier_state()
1429 err = copy_func_state(dst, src->frame[i]); in copy_verifier_state()
1438 return env->prog->len; in state_htab_size()
1443 struct bpf_verifier_state *cur = env->cur_state; in explored_state()
1444 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state()
1446 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
1453 if (a->curframe != b->curframe) in same_callsites()
1456 for (fr = a->curframe; fr >= 0; fr--) in same_callsites()
1457 if (a->frame[fr]->callsite != b->frame[fr]->callsite) in same_callsites()
1463 /* Open coded iterators allow back-edges in the state graph in order to
1467 * part of some loops in order to decide whether non-exact states
1469 * - non-exact states comparison establishes sub-state relation and uses
1472 * - exact states comparison just checks if current and explored states
1473 * are identical (and thus form a back-edge).
1497 * ... ... .---------> hdr
1500 * cur .-> succ | .------...
1503 * succ '-- cur | ... ...
1506 * | succ <- cur
1511 * '----'
1523 * .------... .------...
1526 * .-> hdr ... ... ...
1529 * | succ <- cur succ <- cur
1534 * '----' exit
1578 * - use st->branch == 0 as a signal that DFS of succ had been finished
1581 * - use st->branch > 0 as a signal that st is in the current DFS path;
1582 * - handle cases B and C in is_state_visited();
1583 * - update topmost loop entry for intermediate states in get_loop_entry().
1587 struct bpf_verifier_state *topmost = st->loop_entry, *old; in get_loop_entry()
1589 while (topmost && topmost->loop_entry && topmost != topmost->loop_entry) in get_loop_entry()
1590 topmost = topmost->loop_entry; in get_loop_entry()
1594 while (st && st->loop_entry != topmost) { in get_loop_entry()
1595 old = st->loop_entry; in get_loop_entry()
1596 st->loop_entry = topmost; in get_loop_entry()
1608 /* The head1->branches check decides between cases B and C in in update_loop_entry()
1609 * comment for get_loop_entry(). If hdr1->branches == 0 then in update_loop_entry()
1612 * no need to update cur->loop_entry. in update_loop_entry()
1614 if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) { in update_loop_entry()
1615 cur->loop_entry = hdr; in update_loop_entry()
1616 hdr->used_as_loop_entry = true; in update_loop_entry()
1623 u32 br = --st->branches; in update_branch_counts()
1630 if (br == 0 && st->parent && st->loop_entry) in update_branch_counts()
1631 update_loop_entry(st->parent, st->loop_entry); in update_branch_counts()
1641 st = st->parent; in update_branch_counts()
1648 struct bpf_verifier_state *cur = env->cur_state; in pop_stack()
1649 struct bpf_verifier_stack_elem *elem, *head = env->head; in pop_stack()
1652 if (env->head == NULL) in pop_stack()
1653 return -ENOENT; in pop_stack()
1656 err = copy_verifier_state(cur, &head->st); in pop_stack()
1661 bpf_vlog_reset(&env->log, head->log_pos); in pop_stack()
1663 *insn_idx = head->insn_idx; in pop_stack()
1665 *prev_insn_idx = head->prev_insn_idx; in pop_stack()
1666 elem = head->next; in pop_stack()
1667 free_verifier_state(&head->st, false); in pop_stack()
1669 env->head = elem; in pop_stack()
1670 env->stack_size--; in pop_stack()
1678 struct bpf_verifier_state *cur = env->cur_state; in push_stack()
1686 elem->insn_idx = insn_idx; in push_stack()
1687 elem->prev_insn_idx = prev_insn_idx; in push_stack()
1688 elem->next = env->head; in push_stack()
1689 elem->log_pos = env->log.end_pos; in push_stack()
1690 env->head = elem; in push_stack()
1691 env->stack_size++; in push_stack()
1692 err = copy_verifier_state(&elem->st, cur); in push_stack()
1695 elem->st.speculative |= speculative; in push_stack()
1696 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_stack()
1698 env->stack_size); in push_stack()
1701 if (elem->st.parent) { in push_stack()
1702 ++elem->st.parent->branches; in push_stack()
1705 * 1. speculative states will bump 'branches' for non-branch in push_stack()
1713 return &elem->st; in push_stack()
1715 free_verifier_state(env->cur_state, true); in push_stack()
1716 env->cur_state = NULL; in push_stack()
1727 /* This helper doesn't clear reg->id */
1730 reg->var_off = tnum_const(imm); in ___mark_reg_known()
1731 reg->smin_value = (s64)imm; in ___mark_reg_known()
1732 reg->smax_value = (s64)imm; in ___mark_reg_known()
1733 reg->umin_value = imm; in ___mark_reg_known()
1734 reg->umax_value = imm; in ___mark_reg_known()
1736 reg->s32_min_value = (s32)imm; in ___mark_reg_known()
1737 reg->s32_max_value = (s32)imm; in ___mark_reg_known()
1738 reg->u32_min_value = (u32)imm; in ___mark_reg_known()
1739 reg->u32_max_value = (u32)imm; in ___mark_reg_known()
1748 memset(((u8 *)reg) + sizeof(reg->type), 0, in __mark_reg_known()
1749 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); in __mark_reg_known()
1750 reg->id = 0; in __mark_reg_known()
1751 reg->ref_obj_id = 0; in __mark_reg_known()
1757 reg->var_off = tnum_const_subreg(reg->var_off, imm); in __mark_reg32_known()
1758 reg->s32_min_value = (s32)imm; in __mark_reg32_known()
1759 reg->s32_max_value = (s32)imm; in __mark_reg32_known()
1760 reg->u32_min_value = (u32)imm; in __mark_reg32_known()
1761 reg->u32_max_value = (u32)imm; in __mark_reg32_known()
1775 reg->type = SCALAR_VALUE; in __mark_reg_const_zero()
1779 reg->precise = !env->bpf_capable; in __mark_reg_const_zero()
1798 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for in __mark_dynptr_reg()
1803 reg->type = CONST_PTR_TO_DYNPTR; in __mark_dynptr_reg()
1805 reg->id = dynptr_id; in __mark_dynptr_reg()
1806 reg->dynptr.type = type; in __mark_dynptr_reg()
1807 reg->dynptr.first_slot = first_slot; in __mark_dynptr_reg()
1812 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { in mark_ptr_not_null_reg()
1813 const struct bpf_map *map = reg->map_ptr; in mark_ptr_not_null_reg()
1815 if (map->inner_map_meta) { in mark_ptr_not_null_reg()
1816 reg->type = CONST_PTR_TO_MAP; in mark_ptr_not_null_reg()
1817 reg->map_ptr = map->inner_map_meta; in mark_ptr_not_null_reg()
1821 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) in mark_ptr_not_null_reg()
1822 reg->map_uid = reg->id; in mark_ptr_not_null_reg()
1823 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { in mark_ptr_not_null_reg()
1824 reg->type = PTR_TO_XDP_SOCK; in mark_ptr_not_null_reg()
1825 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || in mark_ptr_not_null_reg()
1826 map->map_type == BPF_MAP_TYPE_SOCKHASH) { in mark_ptr_not_null_reg()
1827 reg->type = PTR_TO_SOCKET; in mark_ptr_not_null_reg()
1829 reg->type = PTR_TO_MAP_VALUE; in mark_ptr_not_null_reg()
1834 reg->type &= ~PTR_MAYBE_NULL; in mark_ptr_not_null_reg()
1842 regs[regno].btf = ds_head->btf; in mark_reg_graph_node()
1843 regs[regno].btf_id = ds_head->value_btf_id; in mark_reg_graph_node()
1844 regs[regno].off = ds_head->node_offset; in mark_reg_graph_node()
1849 return type_is_pkt_pointer(reg->type); in reg_is_pkt_pointer()
1855 reg->type == PTR_TO_PACKET_END; in reg_is_pkt_pointer_any()
1860 return base_type(reg->type) == PTR_TO_MEM && in reg_is_dynptr_slice_pkt()
1861 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); in reg_is_dynptr_slice_pkt()
1872 return reg->type == which && in reg_is_init_pkt_pointer()
1873 reg->id == 0 && in reg_is_init_pkt_pointer()
1874 reg->off == 0 && in reg_is_init_pkt_pointer()
1875 tnum_equals_const(reg->var_off, 0); in reg_is_init_pkt_pointer()
1881 reg->smin_value = S64_MIN; in __mark_reg_unbounded()
1882 reg->smax_value = S64_MAX; in __mark_reg_unbounded()
1883 reg->umin_value = 0; in __mark_reg_unbounded()
1884 reg->umax_value = U64_MAX; in __mark_reg_unbounded()
1886 reg->s32_min_value = S32_MIN; in __mark_reg_unbounded()
1887 reg->s32_max_value = S32_MAX; in __mark_reg_unbounded()
1888 reg->u32_min_value = 0; in __mark_reg_unbounded()
1889 reg->u32_max_value = U32_MAX; in __mark_reg_unbounded()
1894 reg->smin_value = S64_MIN; in __mark_reg64_unbounded()
1895 reg->smax_value = S64_MAX; in __mark_reg64_unbounded()
1896 reg->umin_value = 0; in __mark_reg64_unbounded()
1897 reg->umax_value = U64_MAX; in __mark_reg64_unbounded()
1902 reg->s32_min_value = S32_MIN; in __mark_reg32_unbounded()
1903 reg->s32_max_value = S32_MAX; in __mark_reg32_unbounded()
1904 reg->u32_min_value = 0; in __mark_reg32_unbounded()
1905 reg->u32_max_value = U32_MAX; in __mark_reg32_unbounded()
1910 struct tnum var32_off = tnum_subreg(reg->var_off); in __update_reg32_bounds()
1913 reg->s32_min_value = max_t(s32, reg->s32_min_value, in __update_reg32_bounds()
1916 reg->s32_max_value = min_t(s32, reg->s32_max_value, in __update_reg32_bounds()
1918 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); in __update_reg32_bounds()
1919 reg->u32_max_value = min(reg->u32_max_value, in __update_reg32_bounds()
1926 reg->smin_value = max_t(s64, reg->smin_value, in __update_reg64_bounds()
1927 reg->var_off.value | (reg->var_off.mask & S64_MIN)); in __update_reg64_bounds()
1929 reg->smax_value = min_t(s64, reg->smax_value, in __update_reg64_bounds()
1930 reg->var_off.value | (reg->var_off.mask & S64_MAX)); in __update_reg64_bounds()
1931 reg->umin_value = max(reg->umin_value, reg->var_off.value); in __update_reg64_bounds()
1932 reg->umax_value = min(reg->umax_value, in __update_reg64_bounds()
1933 reg->var_off.value | reg->var_off.mask); in __update_reg64_bounds()
1942 /* Uses signed min/max values to inform unsigned, and vice-versa */
1950 * [10, 20] range. But this property holds for any 64-bit range as in __reg32_deduce_bounds()
1961 * depends on actual hexadecimal values of 32-bit range. They can form in __reg32_deduce_bounds()
1966 if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) { in __reg32_deduce_bounds()
1970 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value); in __reg32_deduce_bounds()
1971 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value); in __reg32_deduce_bounds()
1973 if ((s32)reg->umin_value <= (s32)reg->umax_value) { in __reg32_deduce_bounds()
1974 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
1975 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
1978 if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) { in __reg32_deduce_bounds()
1980 if ((u32)reg->smin_value <= (u32)reg->smax_value) { in __reg32_deduce_bounds()
1981 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value); in __reg32_deduce_bounds()
1982 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value); in __reg32_deduce_bounds()
1985 if ((s32)reg->smin_value <= (s32)reg->smax_value) { in __reg32_deduce_bounds()
1986 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
1987 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
1991 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to in __reg32_deduce_bounds()
1994 * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]). in __reg32_deduce_bounds()
1995 * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff, in __reg32_deduce_bounds()
1997 * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]). in __reg32_deduce_bounds()
2001 * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister. in __reg32_deduce_bounds()
2003 if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) && in __reg32_deduce_bounds()
2004 (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) { in __reg32_deduce_bounds()
2005 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
2006 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
2008 if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) && in __reg32_deduce_bounds()
2009 (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) { in __reg32_deduce_bounds()
2010 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
2011 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
2016 if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) { in __reg32_deduce_bounds()
2017 reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2018 reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2022 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg32_deduce_bounds()
2024 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { in __reg32_deduce_bounds()
2025 reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2026 reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2037 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2045 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2046 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2049 * contiguous to the right of it, wrapping around from -1 to 0, and in __reg64_deduce_bounds()
2052 * more visually as mapped to sign-agnostic range of hex values. in __reg64_deduce_bounds()
2058 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2059 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2061 * >------------------------------ -------------------------------> in __reg64_deduce_bounds()
2071 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2077 * will be non-negative both as u64 and s64 (and in fact it will be in __reg64_deduce_bounds()
2080 * non-negative range of values larger than 0x8000000000000000. in __reg64_deduce_bounds()
2099 if ((s64)reg->umin_value <= (s64)reg->umax_value) { in __reg64_deduce_bounds()
2100 reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2101 reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2105 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg64_deduce_bounds()
2107 if ((u64)reg->smin_value <= (u64)reg->smax_value) { in __reg64_deduce_bounds()
2108 reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2109 reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2115 /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit in __reg_deduce_mixed_bounds()
2116 * values on both sides of 64-bit range in hope to have tigher range. in __reg_deduce_mixed_bounds()
2118 * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff]. in __reg_deduce_mixed_bounds()
2119 * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound in __reg_deduce_mixed_bounds()
2120 * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of in __reg_deduce_mixed_bounds()
2121 * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a in __reg_deduce_mixed_bounds()
2124 * with are well-formed ranges in respecitve s64 or u64 domain, just in __reg_deduce_mixed_bounds()
2125 * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments. in __reg_deduce_mixed_bounds()
2130 /* u32 -> u64 tightening, it's always well-formed */ in __reg_deduce_mixed_bounds()
2131 new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2132 new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2133 reg->umin_value = max_t(u64, reg->umin_value, new_umin); in __reg_deduce_mixed_bounds()
2134 reg->umax_value = min_t(u64, reg->umax_value, new_umax); in __reg_deduce_mixed_bounds()
2135 /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */ in __reg_deduce_mixed_bounds()
2136 new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2137 new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2138 reg->smin_value = max_t(s64, reg->smin_value, new_smin); in __reg_deduce_mixed_bounds()
2139 reg->smax_value = min_t(s64, reg->smax_value, new_smax); in __reg_deduce_mixed_bounds()
2142 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { in __reg_deduce_mixed_bounds()
2143 /* s32 -> u64 tightening */ in __reg_deduce_mixed_bounds()
2144 new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; in __reg_deduce_mixed_bounds()
2145 new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; in __reg_deduce_mixed_bounds()
2146 reg->umin_value = max_t(u64, reg->umin_value, new_umin); in __reg_deduce_mixed_bounds()
2147 reg->umax_value = min_t(u64, reg->umax_value, new_umax); in __reg_deduce_mixed_bounds()
2148 /* s32 -> s64 tightening */ in __reg_deduce_mixed_bounds()
2149 new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; in __reg_deduce_mixed_bounds()
2150 new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; in __reg_deduce_mixed_bounds()
2151 reg->smin_value = max_t(s64, reg->smin_value, new_smin); in __reg_deduce_mixed_bounds()
2152 reg->smax_value = min_t(s64, reg->smax_value, new_smax); in __reg_deduce_mixed_bounds()
2166 struct tnum var64_off = tnum_intersect(reg->var_off, in __reg_bound_offset()
2167 tnum_range(reg->umin_value, in __reg_bound_offset()
2168 reg->umax_value)); in __reg_bound_offset()
2170 tnum_range(reg->u32_min_value, in __reg_bound_offset()
2171 reg->u32_max_value)); in __reg_bound_offset()
2173 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); in __reg_bound_offset()
2197 if (reg->umin_value > reg->umax_value || in reg_bounds_sanity_check()
2198 reg->smin_value > reg->smax_value || in reg_bounds_sanity_check()
2199 reg->u32_min_value > reg->u32_max_value || in reg_bounds_sanity_check()
2200 reg->s32_min_value > reg->s32_max_value) { in reg_bounds_sanity_check()
2205 if (tnum_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2206 u64 uval = reg->var_off.value; in reg_bounds_sanity_check()
2209 if (reg->umin_value != uval || reg->umax_value != uval || in reg_bounds_sanity_check()
2210 reg->smin_value != sval || reg->smax_value != sval) { in reg_bounds_sanity_check()
2216 if (tnum_subreg_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2217 u32 uval32 = tnum_subreg(reg->var_off).value; in reg_bounds_sanity_check()
2220 if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || in reg_bounds_sanity_check()
2221 reg->s32_min_value != sval32 || reg->s32_max_value != sval32) { in reg_bounds_sanity_check()
2231 ctx, msg, reg->umin_value, reg->umax_value, in reg_bounds_sanity_check()
2232 reg->smin_value, reg->smax_value, in reg_bounds_sanity_check()
2233 reg->u32_min_value, reg->u32_max_value, in reg_bounds_sanity_check()
2234 reg->s32_min_value, reg->s32_max_value, in reg_bounds_sanity_check()
2235 reg->var_off.value, reg->var_off.mask); in reg_bounds_sanity_check()
2236 if (env->test_reg_invariants) in reg_bounds_sanity_check()
2237 return -EFAULT; in reg_bounds_sanity_check()
2249 reg->umin_value = reg->u32_min_value; in __reg_assign_32_into_64()
2250 reg->umax_value = reg->u32_max_value; in __reg_assign_32_into_64()
2252 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must in __reg_assign_32_into_64()
2256 if (__reg32_bound_s64(reg->s32_min_value) && in __reg_assign_32_into_64()
2257 __reg32_bound_s64(reg->s32_max_value)) { in __reg_assign_32_into_64()
2258 reg->smin_value = reg->s32_min_value; in __reg_assign_32_into_64()
2259 reg->smax_value = reg->s32_max_value; in __reg_assign_32_into_64()
2261 reg->smin_value = 0; in __reg_assign_32_into_64()
2262 reg->smax_value = U32_MAX; in __reg_assign_32_into_64()
2275 reg->type = SCALAR_VALUE; in __mark_reg_unknown()
2276 reg->id = 0; in __mark_reg_unknown()
2277 reg->ref_obj_id = 0; in __mark_reg_unknown()
2278 reg->var_off = tnum_unknown; in __mark_reg_unknown()
2279 reg->frameno = 0; in __mark_reg_unknown()
2280 reg->precise = !env->bpf_capable; in __mark_reg_unknown()
2301 reg->type = NOT_INIT; in __mark_reg_not_init()
2337 struct bpf_reg_state *regs = state->regs; in init_reg_state()
2350 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
2358 #define BPF_MAIN_FUNC (-1)
2363 state->callsite = callsite; in init_func_state()
2364 state->frameno = frameno; in init_func_state()
2365 state->subprogno = subprogno; in init_func_state()
2366 state->callback_ret_range = retval_range(0, 0); in init_func_state()
2383 elem->insn_idx = insn_idx; in push_async_cb()
2384 elem->prev_insn_idx = prev_insn_idx; in push_async_cb()
2385 elem->next = env->head; in push_async_cb()
2386 elem->log_pos = env->log.end_pos; in push_async_cb()
2387 env->head = elem; in push_async_cb()
2388 env->stack_size++; in push_async_cb()
2389 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_async_cb()
2392 env->stack_size); in push_async_cb()
2400 elem->st.branches = 1; in push_async_cb()
2408 elem->st.frame[0] = frame; in push_async_cb()
2409 return &elem->st; in push_async_cb()
2411 free_verifier_state(env->cur_state, true); in push_async_cb()
2412 env->cur_state = NULL; in push_async_cb()
2427 return ((struct bpf_subprog_info *)a)->start - in cmp_subprogs()
2428 ((struct bpf_subprog_info *)b)->start; in cmp_subprogs()
2435 p = bsearch(&off, env->subprog_info, env->subprog_cnt, in find_subprog()
2436 sizeof(env->subprog_info[0]), cmp_subprogs); in find_subprog()
2438 return -ENOENT; in find_subprog()
2439 return p - env->subprog_info; in find_subprog()
2445 int insn_cnt = env->prog->len; in add_subprog()
2450 return -EINVAL; in add_subprog()
2455 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { in add_subprog()
2457 return -E2BIG; in add_subprog()
2460 env->subprog_info[env->subprog_cnt++].start = off; in add_subprog()
2461 sort(env->subprog_info, env->subprog_cnt, in add_subprog()
2462 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); in add_subprog()
2463 return env->subprog_cnt - 1; in add_subprog()
2468 struct bpf_prog_aux *aux = env->prog->aux; in bpf_find_exception_callback_insn_off()
2469 struct btf *btf = aux->btf; in bpf_find_exception_callback_insn_off()
2475 /* Non-zero func_info_cnt implies valid btf */ in bpf_find_exception_callback_insn_off()
2476 if (!aux->func_info_cnt) in bpf_find_exception_callback_insn_off()
2478 main_btf_id = aux->func_info[0].type_id; in bpf_find_exception_callback_insn_off()
2483 return -EINVAL; in bpf_find_exception_callback_insn_off()
2486 name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:"); in bpf_find_exception_callback_insn_off()
2490 if (ret == -ENOENT) in bpf_find_exception_callback_insn_off()
2492 else if (ret == -EEXIST) in bpf_find_exception_callback_insn_off()
2506 return -EINVAL; in bpf_find_exception_callback_insn_off()
2509 for (i = 0; i < aux->func_info_cnt; i++) { in bpf_find_exception_callback_insn_off()
2510 if (aux->func_info[i].type_id != id) in bpf_find_exception_callback_insn_off()
2512 ret = aux->func_info[i].insn_off; in bpf_find_exception_callback_insn_off()
2518 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
2523 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
2566 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; in kfunc_desc_cmp_by_id_off()
2574 return d0->offset - d1->offset; in kfunc_btf_cmp_by_off()
2586 tab = prog->aux->kfunc_tab; in find_kfunc_desc()
2587 return bsearch(&desc, tab->descs, tab->nr_descs, in find_kfunc_desc()
2588 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); in find_kfunc_desc()
2598 return -EFAULT; in bpf_get_kfunc_addr()
2600 *func_addr = (u8 *)desc->addr; in bpf_get_kfunc_addr()
2614 tab = env->prog->aux->kfunc_btf_tab; in __find_kfunc_desc_btf()
2615 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, in __find_kfunc_desc_btf()
2616 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); in __find_kfunc_desc_btf()
2618 if (tab->nr_descs == MAX_KFUNC_BTFS) { in __find_kfunc_desc_btf()
2620 return ERR_PTR(-E2BIG); in __find_kfunc_desc_btf()
2623 if (bpfptr_is_null(env->fd_array)) { in __find_kfunc_desc_btf()
2625 return ERR_PTR(-EPROTO); in __find_kfunc_desc_btf()
2628 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, in __find_kfunc_desc_btf()
2631 return ERR_PTR(-EFAULT); in __find_kfunc_desc_btf()
2642 return ERR_PTR(-EINVAL); in __find_kfunc_desc_btf()
2648 return ERR_PTR(-ENXIO); in __find_kfunc_desc_btf()
2651 b = &tab->descs[tab->nr_descs++]; in __find_kfunc_desc_btf()
2652 b->btf = btf; in __find_kfunc_desc_btf()
2653 b->module = mod; in __find_kfunc_desc_btf()
2654 b->offset = offset; in __find_kfunc_desc_btf()
2656 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in __find_kfunc_desc_btf()
2659 return b->btf; in __find_kfunc_desc_btf()
2667 while (tab->nr_descs--) { in bpf_free_kfunc_btf_tab()
2668 module_put(tab->descs[tab->nr_descs].module); in bpf_free_kfunc_btf_tab()
2669 btf_put(tab->descs[tab->nr_descs].btf); in bpf_free_kfunc_btf_tab()
2682 return ERR_PTR(-EINVAL); in find_kfunc_desc_btf()
2687 return btf_vmlinux ?: ERR_PTR(-ENOENT); in find_kfunc_desc_btf()
2703 prog_aux = env->prog->aux; in add_kfunc_call()
2704 tab = prog_aux->kfunc_tab; in add_kfunc_call()
2705 btf_tab = prog_aux->kfunc_btf_tab; in add_kfunc_call()
2709 return -ENOTSUPP; in add_kfunc_call()
2712 if (!env->prog->jit_requested) { in add_kfunc_call()
2714 return -ENOTSUPP; in add_kfunc_call()
2719 return -ENOTSUPP; in add_kfunc_call()
2722 if (!env->prog->gpl_compatible) { in add_kfunc_call()
2723 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); in add_kfunc_call()
2724 return -EINVAL; in add_kfunc_call()
2729 return -ENOMEM; in add_kfunc_call()
2730 prog_aux->kfunc_tab = tab; in add_kfunc_call()
2745 return -ENOMEM; in add_kfunc_call()
2746 prog_aux->kfunc_btf_tab = btf_tab; in add_kfunc_call()
2755 if (find_kfunc_desc(env->prog, func_id, offset)) in add_kfunc_call()
2758 if (tab->nr_descs == MAX_KFUNC_DESCS) { in add_kfunc_call()
2760 return -E2BIG; in add_kfunc_call()
2767 return -EINVAL; in add_kfunc_call()
2769 func_proto = btf_type_by_id(desc_btf, func->type); in add_kfunc_call()
2773 return -EINVAL; in add_kfunc_call()
2776 func_name = btf_name_by_offset(desc_btf, func->name_off); in add_kfunc_call()
2781 return -EINVAL; in add_kfunc_call()
2789 /* Check whether the relative offset overflows desc->imm */ in add_kfunc_call()
2793 return -EINVAL; in add_kfunc_call()
2798 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); in add_kfunc_call()
2803 desc = &tab->descs[tab->nr_descs++]; in add_kfunc_call()
2804 desc->func_id = func_id; in add_kfunc_call()
2805 desc->imm = call_imm; in add_kfunc_call()
2806 desc->offset = offset; in add_kfunc_call()
2807 desc->addr = addr; in add_kfunc_call()
2808 err = btf_distill_func_proto(&env->log, desc_btf, in add_kfunc_call()
2810 &desc->func_model); in add_kfunc_call()
2812 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in add_kfunc_call()
2822 if (d0->imm != d1->imm) in kfunc_desc_cmp_by_imm_off()
2823 return d0->imm < d1->imm ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
2824 if (d0->offset != d1->offset) in kfunc_desc_cmp_by_imm_off()
2825 return d0->offset < d1->offset ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
2833 tab = prog->aux->kfunc_tab; in sort_kfunc_descs_by_imm_off()
2837 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in sort_kfunc_descs_by_imm_off()
2843 return !!prog->aux->kfunc_tab; in bpf_prog_has_kfunc_call()
2851 .imm = insn->imm, in bpf_jit_find_kfunc_model()
2852 .offset = insn->off, in bpf_jit_find_kfunc_model()
2857 tab = prog->aux->kfunc_tab; in bpf_jit_find_kfunc_model()
2858 res = bsearch(&desc, tab->descs, tab->nr_descs, in bpf_jit_find_kfunc_model()
2859 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); in bpf_jit_find_kfunc_model()
2861 return res ? &res->func_model : NULL; in bpf_jit_find_kfunc_model()
2866 struct bpf_subprog_info *subprog = env->subprog_info; in add_subprog_and_kfunc()
2867 int i, ret, insn_cnt = env->prog->len, ex_cb_insn; in add_subprog_and_kfunc()
2868 struct bpf_insn *insn = env->prog->insnsi; in add_subprog_and_kfunc()
2880 if (!env->bpf_capable) { in add_subprog_and_kfunc()
2882 return -EPERM; in add_subprog_and_kfunc()
2886 ret = add_subprog(env, i + insn->imm + 1); in add_subprog_and_kfunc()
2888 ret = add_kfunc_call(env, insn->imm, insn->off); in add_subprog_and_kfunc()
2906 for (i = 1; i < env->subprog_cnt; i++) { in add_subprog_and_kfunc()
2907 if (env->subprog_info[i].start != ex_cb_insn) in add_subprog_and_kfunc()
2909 env->exception_callback_subprog = i; in add_subprog_and_kfunc()
2918 subprog[env->subprog_cnt].start = insn_cnt; in add_subprog_and_kfunc()
2920 if (env->log.level & BPF_LOG_LEVEL2) in add_subprog_and_kfunc()
2921 for (i = 0; i < env->subprog_cnt; i++) in add_subprog_and_kfunc()
2930 struct bpf_subprog_info *subprog = env->subprog_info; in check_subprogs()
2931 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
2932 int insn_cnt = env->prog->len; in check_subprogs()
2957 return -EINVAL; in check_subprogs()
2960 if (i == subprog_end - 1) { in check_subprogs()
2961 /* to avoid fall-through from one subprog into another in check_subprogs()
2969 return -EINVAL; in check_subprogs()
2973 if (cur_subprog < env->subprog_cnt) in check_subprogs()
2981 * issues like callee-saved registers, stack slot allocation time, etc.
2987 bool writes = parent == state->parent; /* Observe write marks */ in mark_reg_read()
2992 if (writes && state->live & REG_LIVE_WRITTEN) in mark_reg_read()
2994 if (parent->live & REG_LIVE_DONE) { in mark_reg_read()
2996 reg_type_str(env, parent->type), in mark_reg_read()
2997 parent->var_off.value, parent->off); in mark_reg_read()
2998 return -EFAULT; in mark_reg_read()
3003 if ((parent->live & REG_LIVE_READ) == flag || in mark_reg_read()
3004 parent->live & REG_LIVE_READ64) in mark_reg_read()
3008 * keep re-marking all parents as LIVE_READ. in mark_reg_read()
3010 * multiple times without writes into it in-between. in mark_reg_read()
3016 parent->live |= flag; in mark_reg_read()
3019 parent->live &= ~REG_LIVE_READ32; in mark_reg_read()
3021 parent = state->parent; in mark_reg_read()
3026 if (env->longest_mark_read_walk < cnt) in mark_reg_read()
3027 env->longest_mark_read_walk = cnt; in mark_reg_read()
3040 if (reg->type == CONST_PTR_TO_DYNPTR) in mark_dynptr_read()
3049 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr, in mark_dynptr_read()
3050 state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64); in mark_dynptr_read()
3053 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr, in mark_dynptr_read()
3054 state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64); in mark_dynptr_read()
3064 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr; in mark_iter_read()
3066 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); in mark_iter_read()
3070 mark_stack_slot_scratched(env, spi - i); in mark_iter_read()
3076 /* This function is supposed to be used by the following 32-bit optimization
3078 * on 64-bit, otherwise return FALSE.
3085 code = insn->code; in is_reg64()
3100 if (insn->src_reg == BPF_PSEUDO_CALL) in is_reg64()
3112 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) in is_reg64()
3116 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) in is_reg64()
3134 if (t == SRC_OP && reg->type != SCALAR_VALUE) in is_reg64()
3146 /* Both LD_IND and LD_ABS return 32-bit data. */ in is_reg64()
3166 /* Return the regno defined by the insn, or -1. */
3169 switch (BPF_CLASS(insn->code)) { in insn_def_regno()
3173 return -1; in insn_def_regno()
3175 if (BPF_MODE(insn->code) == BPF_ATOMIC && in insn_def_regno()
3176 (insn->imm & BPF_FETCH)) { in insn_def_regno()
3177 if (insn->imm == BPF_CMPXCHG) in insn_def_regno()
3180 return insn->src_reg; in insn_def_regno()
3182 return -1; in insn_def_regno()
3185 return insn->dst_reg; in insn_def_regno()
3189 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
3194 if (dst_reg == -1) in insn_has_def32()
3203 s32 def_idx = reg->subreg_def; in mark_insn_zext()
3208 env->insn_aux_data[def_idx - 1].zext_dst = true; in mark_insn_zext()
3209 /* The dst will be zero extended, so won't be sub-register anymore. */ in mark_insn_zext()
3210 reg->subreg_def = DEF_NOT_SUBREG; in mark_insn_zext()
3216 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in __check_reg_arg()
3222 return -EINVAL; in __check_reg_arg()
3231 if (reg->type == NOT_INIT) { in __check_reg_arg()
3233 return -EACCES; in __check_reg_arg()
3235 /* We don't need to worry about FP liveness because it's read-only */ in __check_reg_arg()
3242 return mark_reg_read(env, reg, reg->parent, in __check_reg_arg()
3248 return -EACCES; in __check_reg_arg()
3250 reg->live |= REG_LIVE_WRITTEN; in __check_reg_arg()
3251 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in __check_reg_arg()
3261 struct bpf_verifier_state *vstate = env->cur_state; in check_reg_arg()
3262 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_reg_arg()
3264 return __check_reg_arg(env, state->regs, regno, t); in check_reg_arg()
3284 env->insn_aux_data[idx].jmp_point = true; in mark_jmp_point()
3289 return env->insn_aux_data[insn_idx].jmp_point; in is_jmp_point()
3296 u32 cnt = cur->jmp_history_cnt; in push_jmp_history()
3301 if (env->cur_hist_ent) { in push_jmp_history()
3305 WARN_ONCE((env->cur_hist_ent->flags & insn_flags) && in push_jmp_history()
3306 (env->cur_hist_ent->flags & insn_flags) != insn_flags, in push_jmp_history()
3308 env->insn_idx, env->cur_hist_ent->flags, insn_flags); in push_jmp_history()
3309 env->cur_hist_ent->flags |= insn_flags; in push_jmp_history()
3315 p = krealloc(cur->jmp_history, alloc_size, GFP_USER); in push_jmp_history()
3317 return -ENOMEM; in push_jmp_history()
3318 cur->jmp_history = p; in push_jmp_history()
3320 p = &cur->jmp_history[cnt - 1]; in push_jmp_history()
3321 p->idx = env->insn_idx; in push_jmp_history()
3322 p->prev_idx = env->prev_insn_idx; in push_jmp_history()
3323 p->flags = insn_flags; in push_jmp_history()
3324 cur->jmp_history_cnt = cnt; in push_jmp_history()
3325 env->cur_hist_ent = p; in push_jmp_history()
3333 if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx) in get_jmp_hist_entry()
3334 return &st->jmp_history[hist_end - 1]; in get_jmp_hist_entry()
3340 * Return -ENOENT if we exhausted all instructions within given state.
3343 * insn index within the same state, e.g.: 3->4->5->3, so just because current
3356 if (i == st->first_insn_idx) { in get_prev_insn_idx()
3358 return -ENOENT; in get_prev_insn_idx()
3359 if (cnt == 1 && st->jmp_history[0].idx == i) in get_prev_insn_idx()
3360 return -ENOENT; in get_prev_insn_idx()
3363 if (cnt && st->jmp_history[cnt - 1].idx == i) { in get_prev_insn_idx()
3364 i = st->jmp_history[cnt - 1].prev_idx; in get_prev_insn_idx()
3365 (*history)--; in get_prev_insn_idx()
3367 i--; in get_prev_insn_idx()
3377 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) in disasm_kfunc_name()
3380 desc_btf = find_kfunc_desc_btf(data, insn->off); in disasm_kfunc_name()
3384 func = btf_type_by_id(desc_btf, insn->imm); in disasm_kfunc_name()
3385 return btf_name_by_offset(desc_btf, func->name_off); in disasm_kfunc_name()
3390 bt->frame = frame; in bt_init()
3395 struct bpf_verifier_env *env = bt->env; in bt_reset()
3398 bt->env = env; in bt_reset()
3406 for (i = 0; i <= bt->frame; i++) in bt_empty()
3407 mask |= bt->reg_masks[i] | bt->stack_masks[i]; in bt_empty()
3414 if (bt->frame == MAX_CALL_FRAMES - 1) { in bt_subprog_enter()
3415 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); in bt_subprog_enter()
3417 return -EFAULT; in bt_subprog_enter()
3419 bt->frame++; in bt_subprog_enter()
3425 if (bt->frame == 0) { in bt_subprog_exit()
3426 verbose(bt->env, "BUG subprog exit from frame 0\n"); in bt_subprog_exit()
3428 return -EFAULT; in bt_subprog_exit()
3430 bt->frame--; in bt_subprog_exit()
3436 bt->reg_masks[frame] |= 1 << reg; in bt_set_frame_reg()
3441 bt->reg_masks[frame] &= ~(1 << reg); in bt_clear_frame_reg()
3446 bt_set_frame_reg(bt, bt->frame, reg); in bt_set_reg()
3451 bt_clear_frame_reg(bt, bt->frame, reg); in bt_clear_reg()
3456 bt->stack_masks[frame] |= 1ull << slot; in bt_set_frame_slot()
3461 bt->stack_masks[frame] &= ~(1ull << slot); in bt_clear_frame_slot()
3466 return bt->reg_masks[frame]; in bt_frame_reg_mask()
3471 return bt->reg_masks[bt->frame]; in bt_reg_mask()
3476 return bt->stack_masks[frame]; in bt_frame_stack_mask()
3481 return bt->stack_masks[bt->frame]; in bt_stack_mask()
3486 return bt->reg_masks[bt->frame] & (1 << reg); in bt_is_reg_set()
3491 return bt->stack_masks[frame] & (1ull << slot); in bt_is_frame_slot_set()
3508 buf_sz -= n; in fmt_reg_mask()
3513 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
3524 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); in fmt_stack_mask()
3527 buf_sz -= n; in fmt_stack_mask()
3541 * - *would be* executed next, if jump history is viewed in forward order;
3542 * - *was* processed previously during backtracking.
3552 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
3553 u8 class = BPF_CLASS(insn->code); in backtrack_insn()
3554 u8 opcode = BPF_OP(insn->code); in backtrack_insn()
3555 u8 mode = BPF_MODE(insn->code); in backtrack_insn()
3556 u32 dreg = insn->dst_reg; in backtrack_insn()
3557 u32 sreg = insn->src_reg; in backtrack_insn()
3560 if (insn->code == 0) in backtrack_insn()
3562 if (env->log.level & BPF_LOG_LEVEL2) { in backtrack_insn()
3563 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); in backtrack_insn()
3565 bt->frame, env->tmp_str_buf); in backtrack_insn()
3566 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); in backtrack_insn()
3567 verbose(env, "stack=%s before ", env->tmp_str_buf); in backtrack_insn()
3569 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in backtrack_insn()
3581 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
3598 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
3619 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
3621 /* dreg = *(u64 *)[fp - off] was a fill from the stack. in backtrack_insn()
3622 * that [fp - off] slot contains scalar that needs to be in backtrack_insn()
3625 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
3626 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
3634 return -ENOTSUPP; in backtrack_insn()
3636 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
3638 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
3639 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
3649 subprog_insn_idx = idx + insn->imm + 1; in backtrack_insn()
3652 return -EFAULT; in backtrack_insn()
3662 /* r1-r5 are invalidated after subprog call, in backtrack_insn()
3669 return -EFAULT; in backtrack_insn()
3677 * so only r1-r5 could be still requested as in backtrack_insn()
3678 * precise, r0 and r6-r10 or any stack slot in in backtrack_insn()
3684 return -EFAULT; in backtrack_insn()
3692 return -EFAULT; in backtrack_insn()
3694 /* propagate r1-r5 to the caller */ in backtrack_insn()
3698 bt_set_frame_reg(bt, bt->frame - 1, i); in backtrack_insn()
3702 return -EFAULT; in backtrack_insn()
3705 } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { in backtrack_insn()
3706 /* exit from callback subprog to callback-calling helper or in backtrack_insn()
3710 * propagate precision of r1-r5 (if any requested), as they are in backtrack_insn()
3716 return -EFAULT; in backtrack_insn()
3721 return -EFAULT; in backtrack_insn()
3723 /* clear r1-r5 in callback subprog's mask */ in backtrack_insn()
3727 return -EFAULT; in backtrack_insn()
3734 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) in backtrack_insn()
3735 return -ENOTSUPP; in backtrack_insn()
3739 /* if backtracing was looking for registers R1-R5 in backtrack_insn()
3744 return -EFAULT; in backtrack_insn()
3752 * precision to registers R1-R5 should have been found already. in backtrack_insn()
3753 * In case of a callback, it is ok to have R1-R5 marked for in backtrack_insn()
3763 return -EFAULT; in backtrack_insn()
3768 * whether the instruction at subseq_idx-1 is subprog in backtrack_insn()
3774 r0_precise = subseq_idx - 1 >= 0 && in backtrack_insn()
3775 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && in backtrack_insn()
3780 return -EFAULT; in backtrack_insn()
3784 /* r6-r9 and stack slots will stay set in caller frame in backtrack_insn()
3788 } else if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
3801 * this insn, so for the K-based conditional in backtrack_insn()
3815 return -ENOTSUPP; in backtrack_insn()
3848 * r9 -= r8
3879 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
3880 verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n", in mark_all_scalars_precise()
3881 st->curframe); in mark_all_scalars_precise()
3887 * because precision markings in current non-checkpointed state are in mark_all_scalars_precise()
3890 for (st = st->parent; st; st = st->parent) { in mark_all_scalars_precise()
3891 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_precise()
3892 func = st->frame[i]; in mark_all_scalars_precise()
3894 reg = &func->regs[j]; in mark_all_scalars_precise()
3895 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
3897 reg->precise = true; in mark_all_scalars_precise()
3898 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
3903 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
3904 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_precise()
3906 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_precise()
3907 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
3909 reg->precise = true; in mark_all_scalars_precise()
3910 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
3912 i, -(j + 1) * 8); in mark_all_scalars_precise()
3925 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_imprecise()
3926 func = st->frame[i]; in mark_all_scalars_imprecise()
3928 reg = &func->regs[j]; in mark_all_scalars_imprecise()
3929 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
3931 reg->precise = false; in mark_all_scalars_imprecise()
3933 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
3934 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_imprecise()
3936 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_imprecise()
3937 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
3939 reg->precise = false; in mark_all_scalars_imprecise()
3948 for (i = 0; i < s->count; ++i) in idset_contains()
3949 if (s->ids[i] == id) in idset_contains()
3957 if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) in idset_push()
3958 return -EFAULT; in idset_push()
3959 s->ids[s->count++] = id; in idset_push()
3965 s->count = 0; in idset_reset()
3968 /* Collect a set of IDs for all registers currently marked as precise in env->bt.
3973 struct bpf_idset *precise_ids = &env->idset_scratch; in mark_precise_scalar_ids()
3974 struct backtrack_state *bt = &env->bt; in mark_precise_scalar_ids()
3982 for (fr = bt->frame; fr >= 0; fr--) { in mark_precise_scalar_ids()
3983 func = st->frame[fr]; in mark_precise_scalar_ids()
3987 reg = &func->regs[i]; in mark_precise_scalar_ids()
3988 if (!reg->id || reg->type != SCALAR_VALUE) in mark_precise_scalar_ids()
3990 if (idset_push(precise_ids, reg->id)) in mark_precise_scalar_ids()
3991 return -EFAULT; in mark_precise_scalar_ids()
3996 if (i >= func->allocated_stack / BPF_REG_SIZE) in mark_precise_scalar_ids()
3998 if (!is_spilled_scalar_reg(&func->stack[i])) in mark_precise_scalar_ids()
4000 reg = &func->stack[i].spilled_ptr; in mark_precise_scalar_ids()
4001 if (!reg->id) in mark_precise_scalar_ids()
4003 if (idset_push(precise_ids, reg->id)) in mark_precise_scalar_ids()
4004 return -EFAULT; in mark_precise_scalar_ids()
4008 for (fr = 0; fr <= st->curframe; ++fr) { in mark_precise_scalar_ids()
4009 func = st->frame[fr]; in mark_precise_scalar_ids()
4012 reg = &func->regs[i]; in mark_precise_scalar_ids()
4013 if (!reg->id) in mark_precise_scalar_ids()
4015 if (!idset_contains(precise_ids, reg->id)) in mark_precise_scalar_ids()
4019 for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) { in mark_precise_scalar_ids()
4020 if (!is_spilled_scalar_reg(&func->stack[i])) in mark_precise_scalar_ids()
4022 reg = &func->stack[i].spilled_ptr; in mark_precise_scalar_ids()
4023 if (!reg->id) in mark_precise_scalar_ids()
4025 if (!idset_contains(precise_ids, reg->id)) in mark_precise_scalar_ids()
4051 * i.e., it is not yet put into env->explored_states, and it has no children
4054 * reached or b) checkpointed and put into env->explored_states, branching out
4123 struct backtrack_state *bt = &env->bt; in __mark_chain_precision()
4124 struct bpf_verifier_state *st = env->cur_state; in __mark_chain_precision()
4125 int first_idx = st->first_insn_idx; in __mark_chain_precision()
4126 int last_idx = env->insn_idx; in __mark_chain_precision()
4127 int subseq_idx = -1; in __mark_chain_precision()
4133 if (!env->bpf_capable) in __mark_chain_precision()
4137 bt_init(bt, env->cur_state->curframe); in __mark_chain_precision()
4143 func = st->frame[bt->frame]; in __mark_chain_precision()
4145 reg = &func->regs[regno]; in __mark_chain_precision()
4146 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4148 return -EFAULT; in __mark_chain_precision()
4158 u32 history = st->jmp_history_cnt; in __mark_chain_precision()
4161 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4163 bt->frame, last_idx, first_idx, subseq_idx); in __mark_chain_precision()
4176 * --- state #0 --- in __mark_chain_precision()
4180 * --- state #1 {r1.id = A, r2.id = A} --- in __mark_chain_precision()
4184 * --- state #2 {r1.id = A, r2.id = A} --- in __mark_chain_precision()
4189 return -EFAULT; in __mark_chain_precision()
4194 * requested precise registers are R1-R5 in __mark_chain_precision()
4197 if (st->curframe == 0 && in __mark_chain_precision()
4198 st->frame[0]->subprogno > 0 && in __mark_chain_precision()
4199 st->frame[0]->callsite == BPF_MAIN_FUNC && in __mark_chain_precision()
4204 reg = &st->frame[0]->regs[i]; in __mark_chain_precision()
4206 if (reg->type == SCALAR_VALUE) in __mark_chain_precision()
4207 reg->precise = true; in __mark_chain_precision()
4213 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); in __mark_chain_precision()
4215 return -EFAULT; in __mark_chain_precision()
4226 if (err == -ENOTSUPP) { in __mark_chain_precision()
4227 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4241 if (i == -ENOENT) in __mark_chain_precision()
4243 if (i >= env->prog->len) { in __mark_chain_precision()
4252 return -EFAULT; in __mark_chain_precision()
4255 st = st->parent; in __mark_chain_precision()
4259 for (fr = bt->frame; fr >= 0; fr--) { in __mark_chain_precision()
4260 func = st->frame[fr]; in __mark_chain_precision()
4263 reg = &func->regs[i]; in __mark_chain_precision()
4264 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4268 if (reg->precise) in __mark_chain_precision()
4271 reg->precise = true; in __mark_chain_precision()
4276 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
4278 i, func->allocated_stack / BPF_REG_SIZE); in __mark_chain_precision()
4280 return -EFAULT; in __mark_chain_precision()
4283 if (!is_spilled_scalar_reg(&func->stack[i])) { in __mark_chain_precision()
4287 reg = &func->stack[i].spilled_ptr; in __mark_chain_precision()
4288 if (reg->precise) in __mark_chain_precision()
4291 reg->precise = true; in __mark_chain_precision()
4293 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4294 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4297 fr, env->tmp_str_buf); in __mark_chain_precision()
4298 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4300 verbose(env, "stack=%s: ", env->tmp_str_buf); in __mark_chain_precision()
4309 last_idx = st->last_insn_idx; in __mark_chain_precision()
4310 first_idx = st->first_insn_idx; in __mark_chain_precision()
4314 * something (e.g., stack access through non-r10 register), so in __mark_chain_precision()
4318 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4330 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to
4335 return __mark_chain_precision(env, -1); in mark_chain_precision_batch()
4367 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); in register_is_null()
4373 return reg->type == SCALAR_VALUE && in is_reg_const()
4374 tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off); in is_reg_const()
4380 return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value; in reg_const_value()
4385 return tnum_is_unknown(reg->var_off) && in __is_scalar_unbounded()
4386 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && in __is_scalar_unbounded()
4387 reg->umin_value == 0 && reg->umax_value == U64_MAX && in __is_scalar_unbounded()
4388 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && in __is_scalar_unbounded()
4389 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; in __is_scalar_unbounded()
4394 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); in register_is_bounded()
4403 return reg->type != SCALAR_VALUE; in __is_pointer_value()
4406 /* Copy src state preserving dst->parent and dst->live fields */
4409 struct bpf_reg_state *parent = dst->parent; in copy_register_state()
4410 enum bpf_reg_liveness live = dst->live; in copy_register_state()
4413 dst->parent = parent; in copy_register_state()
4414 dst->live = live; in copy_register_state()
4424 copy_register_state(&state->stack[spi].spilled_ptr, reg); in save_register_state()
4426 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in save_register_state()
4428 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
4429 state->stack[spi].slot_type[i - 1] = STACK_SPILL; in save_register_state()
4432 for (; i; i--) in save_register_state()
4433 mark_stack_slot_misc(env, &state->stack[spi].slot_type[i - 1]); in save_register_state()
4438 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; in is_bpf_st_mem()
4451 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
4452 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off()
4454 int insn_flags = insn_stack_access_flags(state->frameno, spi); in check_stack_write_fixed_off()
4456 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, in check_stack_write_fixed_off()
4459 if (!env->allow_ptr_leaks && in check_stack_write_fixed_off()
4460 is_spilled_reg(&state->stack[spi]) && in check_stack_write_fixed_off()
4463 return -EACCES; in check_stack_write_fixed_off()
4466 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_fixed_off()
4468 reg = &cur->regs[value_regno]; in check_stack_write_fixed_off()
4469 if (!env->bypass_spec_v4) { in check_stack_write_fixed_off()
4470 bool sanitize = reg && is_spillable_regtype(reg->type); in check_stack_write_fixed_off()
4473 u8 type = state->stack[spi].slot_type[i]; in check_stack_write_fixed_off()
4482 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; in check_stack_write_fixed_off()
4490 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && env->bpf_capable) { in check_stack_write_fixed_off()
4493 if (fls64(reg->umax_value) > BITS_PER_BYTE * size) in check_stack_write_fixed_off()
4494 state->stack[spi].spilled_ptr.id = 0; in check_stack_write_fixed_off()
4496 insn->imm != 0 && env->bpf_capable) { in check_stack_write_fixed_off()
4499 __mark_reg_known(&fake_reg, insn->imm); in check_stack_write_fixed_off()
4502 } else if (reg && is_spillable_regtype(reg->type)) { in check_stack_write_fixed_off()
4507 return -EACCES; in check_stack_write_fixed_off()
4509 if (state != cur && reg->type == PTR_TO_STACK) { in check_stack_write_fixed_off()
4511 return -EINVAL; in check_stack_write_fixed_off()
4518 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_fixed_off()
4520 if (is_stack_slot_special(&state->stack[spi])) in check_stack_write_fixed_off()
4522 scrub_spilled_slot(&state->stack[spi].slot_type[i]); in check_stack_write_fixed_off()
4533 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in check_stack_write_fixed_off()
4537 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { in check_stack_write_fixed_off()
4552 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; in check_stack_write_fixed_off()
4557 return push_jmp_history(env, env->cur_state, insn_flags); in check_stack_write_fixed_off()
4567 * 'off' includes 'regno->off'.
4568 * 'value_regno' can be -1, meaning that an unknown value is being written to
4590 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_var_off()
4597 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_var_off()
4598 ptr_reg = &cur->regs[ptr_regno]; in check_stack_write_var_off()
4599 min_off = ptr_reg->smin_value + off; in check_stack_write_var_off()
4600 max_off = ptr_reg->smax_value + off + size; in check_stack_write_var_off()
4602 value_reg = &cur->regs[value_regno]; in check_stack_write_var_off()
4604 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) in check_stack_write_var_off()
4621 slot = -i - 1; in check_stack_write_var_off()
4623 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
4626 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { in check_stack_write_var_off()
4638 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", in check_stack_write_var_off()
4640 return -EINVAL; in check_stack_write_var_off()
4644 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_var_off()
4660 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { in check_stack_write_var_off()
4661 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", in check_stack_write_var_off()
4663 return -EINVAL; in check_stack_write_var_off()
4689 struct bpf_verifier_state *vstate = env->cur_state; in mark_reg_stack_read()
4690 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_reg_stack_read()
4696 slot = -i - 1; in mark_reg_stack_read()
4699 stype = ptr_state->stack[spi].slot_type; in mark_reg_stack_read()
4704 if (zeros == max_off - min_off) { in mark_reg_stack_read()
4708 __mark_reg_const_zero(env, &state->regs[dst_regno]); in mark_reg_stack_read()
4711 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
4713 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in mark_reg_stack_read()
4720 * 'dst_regno' can be -1, meaning that the read value is not going to a
4730 struct bpf_verifier_state *vstate = env->cur_state; in check_stack_read_fixed_off()
4731 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_stack_read_fixed_off()
4732 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
4735 int insn_flags = insn_stack_access_flags(reg_state->frameno, spi); in check_stack_read_fixed_off()
4737 stype = reg_state->stack[spi].slot_type; in check_stack_read_fixed_off()
4738 reg = &reg_state->stack[spi].spilled_ptr; in check_stack_read_fixed_off()
4742 if (is_spilled_reg(&reg_state->stack[spi])) { in check_stack_read_fixed_off()
4745 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
4749 if (reg->type != SCALAR_VALUE) { in check_stack_read_fixed_off()
4750 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read_fixed_off()
4752 return -EACCES; in check_stack_read_fixed_off()
4755 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
4763 s32 subreg_def = state->regs[dst_regno].subreg_def; in check_stack_read_fixed_off()
4765 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
4766 state->regs[dst_regno].subreg_def = subreg_def; in check_stack_read_fixed_off()
4771 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
4782 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
4786 return -EACCES; in check_stack_read_fixed_off()
4790 tnum_is_const(reg->var_off) && reg->var_off.value == 0) { in check_stack_read_fixed_off()
4791 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
4795 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
4798 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
4802 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
4805 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
4810 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
4811 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { in check_stack_read_fixed_off()
4812 /* If dst_regno==-1, the caller is asking us whether in check_stack_read_fixed_off()
4820 return -EACCES; in check_stack_read_fixed_off()
4822 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
4825 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
4830 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
4834 return -EACCES; in check_stack_read_fixed_off()
4836 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
4842 return push_jmp_history(env, env->cur_state, insn_flags); in check_stack_read_fixed_off()
4871 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
4891 min_off = reg->smin_value + off; in check_stack_read_var_off()
4892 max_off = reg->smax_value + off; in check_stack_read_var_off()
4904 * can be -1, meaning that the read value is not going to a register.
4914 bool var_off = !tnum_is_const(reg->var_off); in check_stack_read()
4923 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_read()
4926 return -EACCES; in check_stack_read()
4938 off += reg->var_off.value; in check_stack_read()
4957 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
4959 * be -1, meaning that we're not writing from a register.
4971 if (tnum_is_const(reg->var_off)) { in check_stack_write()
4972 off += reg->var_off.value; in check_stack_write()
4995 map->value_size, off, size); in check_map_access_type()
4996 return -EACCES; in check_map_access_type()
5001 map->value_size, off, size); in check_map_access_type()
5002 return -EACCES; in check_map_access_type()
5020 switch (reg->type) { in __check_mem_access()
5033 off, size, regno, reg->id, off, mem_size); in __check_mem_access()
5041 return -EACCES; in __check_mem_access()
5049 struct bpf_verifier_state *vstate = env->cur_state; in check_mem_region_access()
5050 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_mem_region_access()
5051 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access()
5064 if (reg->smin_value < 0 && in check_mem_region_access()
5065 (reg->smin_value == S64_MIN || in check_mem_region_access()
5066 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || in check_mem_region_access()
5067 reg->smin_value + off < 0)) { in check_mem_region_access()
5070 return -EACCES; in check_mem_region_access()
5072 err = __check_mem_access(env, regno, reg->smin_value + off, size, in check_mem_region_access()
5082 * If reg->umax_value + off could overflow, treat that as unbounded too. in check_mem_region_access()
5084 if (reg->umax_value >= BPF_MAX_VAR_OFF) { in check_mem_region_access()
5087 return -EACCES; in check_mem_region_access()
5089 err = __check_mem_access(env, regno, reg->umax_value + off, size, in check_mem_region_access()
5104 /* Access to this pointer-typed register or passing it to a helper in __check_ptr_off_reg()
5108 if (reg->off < 0) { in __check_ptr_off_reg()
5110 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5111 return -EACCES; in __check_ptr_off_reg()
5114 if (!fixed_off_ok && reg->off) { in __check_ptr_off_reg()
5116 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5117 return -EACCES; in __check_ptr_off_reg()
5120 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_ptr_off_reg()
5123 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_ptr_off_reg()
5125 reg_type_str(env, reg->type), tn_buf); in __check_ptr_off_reg()
5126 return -EACCES; in __check_ptr_off_reg()
5142 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); in map_kptr_match_type()
5146 if (btf_is_kernel(reg->btf)) { in map_kptr_match_type()
5150 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5154 if (kptr_field->type == BPF_KPTR_PERCPU) in map_kptr_match_type()
5158 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) in map_kptr_match_type()
5161 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ in map_kptr_match_type()
5162 reg_name = btf_type_name(reg->btf, reg->btf_id); in map_kptr_match_type()
5168 * reg->off and reg->ref_obj_id are not needed here. in map_kptr_match_type()
5171 return -EACCES; in map_kptr_match_type()
5174 * we also need to take into account the reg->off. in map_kptr_match_type()
5185 * val->foo = v; // reg->off is zero, btf and btf_id match type in map_kptr_match_type()
5186 * val->bar = &v->br; // reg->off is still zero, but we need to retry with in map_kptr_match_type()
5188 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked in map_kptr_match_type()
5191 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off in map_kptr_match_type()
5197 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in map_kptr_match_type()
5198 kptr_field->kptr.btf, kptr_field->kptr.btf_id, in map_kptr_match_type()
5199 kptr_field->type != BPF_KPTR_UNREF)) in map_kptr_match_type()
5204 reg_type_str(env, reg->type), reg_name); in map_kptr_match_type()
5206 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5211 return -EINVAL; in map_kptr_match_type()
5214 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
5219 return env->cur_state->active_rcu_lock || in in_rcu_cs()
5220 env->cur_state->active_lock.ptr || in in_rcu_cs()
5221 !env->prog->aux->sleepable; in in_rcu_cs()
5247 if (btf_is_kernel(kptr_field->kptr.btf)) in kptr_pointee_btf_record()
5250 meta = btf_find_struct_meta(kptr_field->kptr.btf, in kptr_pointee_btf_record()
5251 kptr_field->kptr.btf_id); in kptr_pointee_btf_record()
5253 return meta ? meta->record : NULL; in kptr_pointee_btf_record()
5258 const struct btf_field_kptr *kptr = &field->kptr; in rcu_safe_kptr()
5260 return field->type == BPF_KPTR_PERCPU || in rcu_safe_kptr()
5261 (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id)); in rcu_safe_kptr()
5272 if (kptr_field->type == BPF_KPTR_PERCPU) in btf_ld_kptr_type()
5274 else if (!btf_is_kernel(kptr_field->kptr.btf)) in btf_ld_kptr_type()
5291 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access()
5292 int class = BPF_CLASS(insn->code); in check_map_kptr_access()
5296 * - Reject cases where variable offset may touch kptr in check_map_kptr_access()
5297 * - size of access (must be BPF_DW) in check_map_kptr_access()
5298 * - tnum_is_const(reg->var_off) in check_map_kptr_access()
5299 * - kptr_field->offset == off + reg->var_off.value in check_map_kptr_access()
5302 if (BPF_MODE(insn->code) != BPF_MEM) { in check_map_kptr_access()
5304 return -EACCES; in check_map_kptr_access()
5311 (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) { in check_map_kptr_access()
5313 return -EACCES; in check_map_kptr_access()
5321 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, in check_map_kptr_access()
5322 kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field)); in check_map_kptr_access()
5324 val_reg->id = ++env->id_gen; in check_map_kptr_access()
5329 return -EACCES; in check_map_kptr_access()
5331 if (insn->imm) { in check_map_kptr_access()
5333 kptr_field->offset); in check_map_kptr_access()
5334 return -EACCES; in check_map_kptr_access()
5338 return -EACCES; in check_map_kptr_access()
5348 struct bpf_verifier_state *vstate = env->cur_state; in check_map_access()
5349 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_map_access()
5350 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
5351 struct bpf_map *map = reg->map_ptr; in check_map_access()
5355 err = check_mem_region_access(env, regno, off, size, map->value_size, in check_map_access()
5360 if (IS_ERR_OR_NULL(map->record)) in check_map_access()
5362 rec = map->record; in check_map_access()
5363 for (i = 0; i < rec->cnt; i++) { in check_map_access()
5364 struct btf_field *field = &rec->fields[i]; in check_map_access()
5365 u32 p = field->offset; in check_map_access()
5371 if (reg->smin_value + off < p + btf_field_type_size(field->type) && in check_map_access()
5372 p < reg->umax_value + off + size) { in check_map_access()
5373 switch (field->type) { in check_map_access()
5379 return -EACCES; in check_map_access()
5381 if (!tnum_is_const(reg->var_off)) { in check_map_access()
5383 return -EACCES; in check_map_access()
5385 if (p != off + reg->var_off.value) { in check_map_access()
5387 p, off + reg->var_off.value); in check_map_access()
5388 return -EACCES; in check_map_access()
5392 return -EACCES; in check_map_access()
5397 btf_field_type_name(field->type)); in check_map_access()
5398 return -EACCES; in check_map_access()
5411 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in may_access_direct_pkt_data()
5433 return meta->pkt_access; in may_access_direct_pkt_data()
5435 env->seen_direct_write = true; in may_access_direct_pkt_data()
5440 env->seen_direct_write = true; in may_access_direct_pkt_data()
5457 * reg->range we have comes after that. We are only checking the fixed in check_packet_access()
5464 if (reg->smin_value < 0) { in check_packet_access()
5467 return -EACCES; in check_packet_access()
5470 err = reg->range < 0 ? -EINVAL : in check_packet_access()
5471 __check_mem_access(env, regno, off, size, reg->range, in check_packet_access()
5478 /* __check_mem_access has made sure "off + size - 1" is within u16. in check_packet_access()
5479 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, in check_packet_access()
5482 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. in check_packet_access()
5484 env->prog->aux->max_pkt_offset = in check_packet_access()
5485 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
5486 off + reg->umax_value + size - 1); in check_packet_access()
5498 .log = &env->log, in check_ctx_access()
5501 if (env->ops->is_valid_access && in check_ctx_access()
5502 env->ops->is_valid_access(off, size, t, env->prog, &info)) { in check_ctx_access()
5516 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; in check_ctx_access()
5519 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
5520 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
5525 return -EACCES; in check_ctx_access()
5535 return -EACCES; in check_flow_keys_access()
5549 if (reg->smin_value < 0) { in check_sock_access()
5552 return -EACCES; in check_sock_access()
5555 switch (reg->type) { in check_sock_access()
5574 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
5580 regno, reg_type_str(env, reg->type), off, size); in check_sock_access()
5582 return -EACCES; in check_sock_access()
5587 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); in is_pointer_value()
5594 return reg->type == PTR_TO_CTX; in is_ctx_reg()
5601 return type_is_sk_pointer(reg->type); in is_sk_reg()
5608 return type_is_pkt_pointer(reg->type); in is_pkt_reg()
5616 return reg->type == PTR_TO_FLOW_KEYS; in is_flow_key_reg()
5631 if (reg->ref_obj_id) in is_trusted_reg()
5635 if (reg2btf_ids[base_type(reg->type)]) in is_trusted_reg()
5640 * other type modifiers may be safe, but we elect to take an opt-in in is_trusted_reg()
5647 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && in is_trusted_reg()
5648 !bpf_type_has_unsafe_modifiers(reg->type); in is_trusted_reg()
5653 return reg->type & MEM_RCU; in is_rcu_reg()
5682 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); in check_pkt_ptr_alignment()
5686 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_pkt_ptr_alignment()
5689 ip_align, tn_buf, reg->off, off, size); in check_pkt_ptr_alignment()
5690 return -EACCES; in check_pkt_ptr_alignment()
5707 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); in check_generic_ptr_alignment()
5711 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_generic_ptr_alignment()
5713 pointer_desc, tn_buf, reg->off, off, size); in check_generic_ptr_alignment()
5714 return -EACCES; in check_generic_ptr_alignment()
5724 bool strict = env->strict_alignment || strict_alignment_once; in check_ptr_alignment()
5727 switch (reg->type) { in check_ptr_alignment()
5781 struct bpf_subprog_info *subprog = env->subprog_info; in check_max_stack_depth_subprog()
5782 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth_subprog()
5798 * func1 -> sub rsp, 128 in check_max_stack_depth_subprog()
5799 * subfunc1 -> sub rsp, 256 in check_max_stack_depth_subprog()
5800 * tailcall1 -> add rsp, 256 in check_max_stack_depth_subprog()
5801 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) in check_max_stack_depth_subprog()
5802 * subfunc2 -> sub rsp, 64 in check_max_stack_depth_subprog()
5803 * subfunc22 -> sub rsp, 128 in check_max_stack_depth_subprog()
5804 * tailcall2 -> add rsp, 128 in check_max_stack_depth_subprog()
5805 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) in check_max_stack_depth_subprog()
5814 return -EACCES; in check_max_stack_depth_subprog()
5816 /* round up to 32-bytes, since this is granularity in check_max_stack_depth_subprog()
5823 return -EACCES; in check_max_stack_depth_subprog()
5848 return -EINVAL; in check_max_stack_depth_subprog()
5863 return -EFAULT; in check_max_stack_depth_subprog()
5868 return -EFAULT; in check_max_stack_depth_subprog()
5875 return -EINVAL; in check_max_stack_depth_subprog()
5888 return -E2BIG; in check_max_stack_depth_subprog()
5901 return -EINVAL; in check_max_stack_depth_subprog()
5906 env->prog->aux->tail_call_reachable = true; in check_max_stack_depth_subprog()
5913 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); in check_max_stack_depth_subprog()
5914 frame--; in check_max_stack_depth_subprog()
5922 struct bpf_subprog_info *si = env->subprog_info; in check_max_stack_depth()
5925 for (int i = 0; i < env->subprog_cnt; i++) { in check_max_stack_depth()
5940 int start = idx + insn->imm + 1, subprog; in get_callee_stack_depth()
5946 return -EFAULT; in get_callee_stack_depth()
5948 return env->subprog_info[subprog].stack_depth; in get_callee_stack_depth()
5961 return -EACCES; in __check_buffer_access()
5963 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_buffer_access()
5966 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_buffer_access()
5970 return -EACCES; in __check_buffer_access()
5986 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
5987 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
5998 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; in check_buffer_access()
6011 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
6014 reg->var_off = tnum_subreg(reg->var_off); in zext_32_to_64()
6026 reg->var_off = tnum_cast(reg->var_off, size); in coerce_reg_to_size()
6029 mask = ((u64)1 << (size * 8)) - 1; in coerce_reg_to_size()
6030 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { in coerce_reg_to_size()
6031 reg->umin_value &= mask; in coerce_reg_to_size()
6032 reg->umax_value &= mask; in coerce_reg_to_size()
6034 reg->umin_value = 0; in coerce_reg_to_size()
6035 reg->umax_value = mask; in coerce_reg_to_size()
6037 reg->smin_value = reg->umin_value; in coerce_reg_to_size()
6038 reg->smax_value = reg->umax_value; in coerce_reg_to_size()
6041 * values are also truncated so we push 64-bit bounds into in coerce_reg_to_size()
6042 * 32-bit bounds. Above were truncated < 32-bits already. in coerce_reg_to_size()
6053 reg->smin_value = reg->s32_min_value = S8_MIN; in set_sext64_default_val()
6054 reg->smax_value = reg->s32_max_value = S8_MAX; in set_sext64_default_val()
6056 reg->smin_value = reg->s32_min_value = S16_MIN; in set_sext64_default_val()
6057 reg->smax_value = reg->s32_max_value = S16_MAX; in set_sext64_default_val()
6060 reg->smin_value = reg->s32_min_value = S32_MIN; in set_sext64_default_val()
6061 reg->smax_value = reg->s32_max_value = S32_MAX; in set_sext64_default_val()
6063 reg->umin_value = reg->u32_min_value = 0; in set_sext64_default_val()
6064 reg->umax_value = U64_MAX; in set_sext64_default_val()
6065 reg->u32_max_value = U32_MAX; in set_sext64_default_val()
6066 reg->var_off = tnum_unknown; in set_sext64_default_val()
6075 if (tnum_is_const(reg->var_off)) { in coerce_reg_to_size_sx()
6076 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6078 reg->var_off = tnum_const((s8)u64_cval); in coerce_reg_to_size_sx()
6080 reg->var_off = tnum_const((s16)u64_cval); in coerce_reg_to_size_sx()
6083 reg->var_off = tnum_const((s32)u64_cval); in coerce_reg_to_size_sx()
6085 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6086 reg->smax_value = reg->smin_value = u64_cval; in coerce_reg_to_size_sx()
6087 reg->umax_value = reg->umin_value = u64_cval; in coerce_reg_to_size_sx()
6088 reg->s32_max_value = reg->s32_min_value = u64_cval; in coerce_reg_to_size_sx()
6089 reg->u32_max_value = reg->u32_min_value = u64_cval; in coerce_reg_to_size_sx()
6093 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6094 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6101 init_s64_max = (s8)reg->smax_value; in coerce_reg_to_size_sx()
6102 init_s64_min = (s8)reg->smin_value; in coerce_reg_to_size_sx()
6104 init_s64_max = (s16)reg->smax_value; in coerce_reg_to_size_sx()
6105 init_s64_min = (s16)reg->smin_value; in coerce_reg_to_size_sx()
6107 init_s64_max = (s32)reg->smax_value; in coerce_reg_to_size_sx()
6108 init_s64_min = (s32)reg->smin_value; in coerce_reg_to_size_sx()
6116 reg->smin_value = reg->s32_min_value = s64_min; in coerce_reg_to_size_sx()
6117 reg->smax_value = reg->s32_max_value = s64_max; in coerce_reg_to_size_sx()
6118 reg->umin_value = reg->u32_min_value = s64_min; in coerce_reg_to_size_sx()
6119 reg->umax_value = reg->u32_max_value = s64_max; in coerce_reg_to_size_sx()
6120 reg->var_off = tnum_range(s64_min, s64_max); in coerce_reg_to_size_sx()
6131 reg->s32_min_value = S8_MIN; in set_sext32_default_val()
6132 reg->s32_max_value = S8_MAX; in set_sext32_default_val()
6135 reg->s32_min_value = S16_MIN; in set_sext32_default_val()
6136 reg->s32_max_value = S16_MAX; in set_sext32_default_val()
6138 reg->u32_min_value = 0; in set_sext32_default_val()
6139 reg->u32_max_value = U32_MAX; in set_sext32_default_val()
6148 if (tnum_is_const(reg->var_off)) { in coerce_subreg_to_size_sx()
6149 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6151 reg->var_off = tnum_const((s8)u32_val); in coerce_subreg_to_size_sx()
6153 reg->var_off = tnum_const((s16)u32_val); in coerce_subreg_to_size_sx()
6155 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6156 reg->s32_min_value = reg->s32_max_value = u32_val; in coerce_subreg_to_size_sx()
6157 reg->u32_min_value = reg->u32_max_value = u32_val; in coerce_subreg_to_size_sx()
6161 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6162 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6169 init_s32_max = (s8)reg->s32_max_value; in coerce_subreg_to_size_sx()
6170 init_s32_min = (s8)reg->s32_min_value; in coerce_subreg_to_size_sx()
6173 init_s32_max = (s16)reg->s32_max_value; in coerce_subreg_to_size_sx()
6174 init_s32_min = (s16)reg->s32_min_value; in coerce_subreg_to_size_sx()
6180 reg->s32_min_value = s32_min; in coerce_subreg_to_size_sx()
6181 reg->s32_max_value = s32_max; in coerce_subreg_to_size_sx()
6182 reg->u32_min_value = (u32)s32_min; in coerce_subreg_to_size_sx()
6183 reg->u32_max_value = (u32)s32_max; in coerce_subreg_to_size_sx()
6193 /* A map is considered read-only if the following condition are true: in bpf_map_is_rdonly()
6206 return (map->map_flags & BPF_F_RDONLY_PROG) && in bpf_map_is_rdonly()
6207 READ_ONCE(map->frozen) && in bpf_map_is_rdonly()
6218 err = map->ops->map_direct_value_addr(map, &addr, off); in bpf_map_direct_read()
6237 return -EINVAL; in bpf_map_direct_read()
6261 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ in BTF_TYPE_SAFE_RCU()
6274 /* skb->sk, req->sk are not RCU protected, but we mark them as such
6304 /* no negative dentry-s in places where bpf can see it */ in BTF_TYPE_SAFE_TRUSTED()
6320 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); in type_is_rcu()
6331 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); in type_is_rcu_or_null()
6345 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); in type_is_trusted()
6355 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); in check_ptr_to_btf_access()
6356 const char *tname = btf_name_by_offset(reg->btf, t->name_off); in check_ptr_to_btf_access()
6362 if (!env->allow_ptr_leaks) { in check_ptr_to_btf_access()
6366 return -EPERM; in check_ptr_to_btf_access()
6368 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
6370 "Cannot access kernel 'struct %s' from non-GPL compatible program\n", in check_ptr_to_btf_access()
6372 return -EINVAL; in check_ptr_to_btf_access()
6378 return -EACCES; in check_ptr_to_btf_access()
6380 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in check_ptr_to_btf_access()
6383 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_ptr_to_btf_access()
6387 return -EACCES; in check_ptr_to_btf_access()
6390 if (reg->type & MEM_USER) { in check_ptr_to_btf_access()
6394 return -EACCES; in check_ptr_to_btf_access()
6397 if (reg->type & MEM_PERCPU) { in check_ptr_to_btf_access()
6401 return -EACCES; in check_ptr_to_btf_access()
6404 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { in check_ptr_to_btf_access()
6405 if (!btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
6406 verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); in check_ptr_to_btf_access()
6407 return -EFAULT; in check_ptr_to_btf_access()
6409 ret = env->ops->btf_struct_access(&env->log, reg, off, size); in check_ptr_to_btf_access()
6415 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { in check_ptr_to_btf_access()
6417 return -EACCES; in check_ptr_to_btf_access()
6420 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && in check_ptr_to_btf_access()
6421 !(reg->type & MEM_RCU) && !reg->ref_obj_id) { in check_ptr_to_btf_access()
6422 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); in check_ptr_to_btf_access()
6423 return -EFAULT; in check_ptr_to_btf_access()
6426 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); in check_ptr_to_btf_access()
6435 } else if (type_flag(reg->type) & PTR_UNTRUSTED) { in check_ptr_to_btf_access()
6446 * 'cgroups' pointer is untrusted if task->cgroups dereference in check_ptr_to_btf_access()
6448 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). in check_ptr_to_btf_access()
6451 * A regular RCU-protected pointer with __rcu tag can also be deemed in check_ptr_to_btf_access()
6456 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { in check_ptr_to_btf_access()
6470 /* keep as-is */ in check_ptr_to_btf_access()
6491 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); in check_ptr_to_btf_access()
6503 struct bpf_map *map = reg->map_ptr; in check_ptr_to_map_access()
6513 return -ENOTSUPP; in check_ptr_to_map_access()
6516 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { in check_ptr_to_map_access()
6518 map->map_type); in check_ptr_to_map_access()
6519 return -ENOTSUPP; in check_ptr_to_map_access()
6522 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); in check_ptr_to_map_access()
6523 tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_ptr_to_map_access()
6525 if (!env->allow_ptr_leaks) { in check_ptr_to_map_access()
6529 return -EPERM; in check_ptr_to_map_access()
6535 return -EACCES; in check_ptr_to_map_access()
6540 return -EACCES; in check_ptr_to_map_access()
6545 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); in check_ptr_to_map_access()
6546 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); in check_ptr_to_map_access()
6557 * maximum valid offset is -1.
6559 * The minimum valid offset is -MAX_BPF_STACK for writes, and
6560 * -state->allocated_stack for reads.
6569 if (t == BPF_WRITE || env->allow_uninit_stack) in check_stack_slot_within_bounds()
6570 min_valid_off = -MAX_BPF_STACK; in check_stack_slot_within_bounds()
6572 min_valid_off = -state->allocated_stack; in check_stack_slot_within_bounds()
6574 if (off < min_valid_off || off > -1) in check_stack_slot_within_bounds()
6575 return -EACCES; in check_stack_slot_within_bounds()
6582 * 'off' includes `regno->offset`, but not its dynamic part (if any).
6604 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
6605 min_off = (s64)reg->var_off.value + off; in check_stack_access_within_bounds()
6608 if (reg->smax_value >= BPF_MAX_VAR_OFF || in check_stack_access_within_bounds()
6609 reg->smin_value <= -BPF_MAX_VAR_OFF) { in check_stack_access_within_bounds()
6610 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", in check_stack_access_within_bounds()
6612 return -EACCES; in check_stack_access_within_bounds()
6614 min_off = reg->smin_value + off; in check_stack_access_within_bounds()
6615 max_off = reg->smax_value + off + access_size; in check_stack_access_within_bounds()
6620 err = -EINVAL; /* out of stack access into non-negative offsets */ in check_stack_access_within_bounds()
6623 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
6629 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_within_bounds()
6630 verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n", in check_stack_access_within_bounds()
6637 * size is -min_off, not -min_off+1. in check_stack_access_within_bounds()
6639 return grow_stack_state(env, state, -min_off /* size */); in check_stack_access_within_bounds()
6645 * if t==write && value_regno==-1, some unknown value is stored into memory
6646 * if t==read && value_regno==-1, don't care what we read from memory
6660 /* alignment checks will add in reg->off themselves */ in check_mem_access()
6665 /* for access checks, reg->off is just part of off */ in check_mem_access()
6666 off += reg->off; in check_mem_access()
6668 if (reg->type == PTR_TO_MAP_KEY) { in check_mem_access()
6671 return -EACCES; in check_mem_access()
6675 reg->map_ptr->key_size, false); in check_mem_access()
6680 } else if (reg->type == PTR_TO_MAP_VALUE) { in check_mem_access()
6686 return -EACCES; in check_mem_access()
6694 if (tnum_is_const(reg->var_off)) in check_mem_access()
6695 kptr_field = btf_record_find(reg->map_ptr->record, in check_mem_access()
6696 off + reg->var_off.value, BPF_KPTR); in check_mem_access()
6700 struct bpf_map *map = reg->map_ptr; in check_mem_access()
6702 /* if map is read-only, track its contents as scalars */ in check_mem_access()
6703 if (tnum_is_const(reg->var_off) && in check_mem_access()
6705 map->ops->map_direct_value_addr) { in check_mem_access()
6706 int map_off = off + reg->var_off.value; in check_mem_access()
6720 } else if (base_type(reg->type) == PTR_TO_MEM) { in check_mem_access()
6721 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
6723 if (type_may_be_null(reg->type)) { in check_mem_access()
6725 reg_type_str(env, reg->type)); in check_mem_access()
6726 return -EACCES; in check_mem_access()
6731 regno, reg_type_str(env, reg->type)); in check_mem_access()
6732 return -EACCES; in check_mem_access()
6738 return -EACCES; in check_mem_access()
6742 reg->mem_size, false); in check_mem_access()
6745 } else if (reg->type == PTR_TO_CTX) { in check_mem_access()
6753 return -EACCES; in check_mem_access()
6775 regs[value_regno].id = ++env->id_gen; in check_mem_access()
6779 * a sub-register. in check_mem_access()
6790 } else if (reg->type == PTR_TO_STACK) { in check_mem_access()
6805 return -EACCES; in check_mem_access()
6811 return -EACCES; in check_mem_access()
6816 } else if (reg->type == PTR_TO_FLOW_KEYS) { in check_mem_access()
6821 return -EACCES; in check_mem_access()
6827 } else if (type_is_sk_pointer(reg->type)) { in check_mem_access()
6830 regno, reg_type_str(env, reg->type)); in check_mem_access()
6831 return -EACCES; in check_mem_access()
6836 } else if (reg->type == PTR_TO_TP_BUFFER) { in check_mem_access()
6840 } else if (base_type(reg->type) == PTR_TO_BTF_ID && in check_mem_access()
6841 !type_may_be_null(reg->type)) { in check_mem_access()
6844 } else if (reg->type == CONST_PTR_TO_MAP) { in check_mem_access()
6847 } else if (base_type(reg->type) == PTR_TO_BUF) { in check_mem_access()
6848 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
6854 regno, reg_type_str(env, reg->type)); in check_mem_access()
6855 return -EACCES; in check_mem_access()
6857 max_access = &env->prog->aux->max_rdonly_access; in check_mem_access()
6859 max_access = &env->prog->aux->max_rdwr_access; in check_mem_access()
6869 reg_type_str(env, reg->type)); in check_mem_access()
6870 return -EACCES; in check_mem_access()
6876 /* b/h/w load zero-extends, mark upper bits as known 0 */ in check_mem_access()
6889 switch (insn->imm) { in check_atomic()
6902 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); in check_atomic()
6903 return -EINVAL; in check_atomic()
6906 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { in check_atomic()
6908 return -EINVAL; in check_atomic()
6912 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_atomic()
6917 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_atomic()
6921 if (insn->imm == BPF_CMPXCHG) { in check_atomic()
6931 return -EACCES; in check_atomic()
6935 if (is_pointer_value(env, insn->src_reg)) { in check_atomic()
6936 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_atomic()
6937 return -EACCES; in check_atomic()
6940 if (is_ctx_reg(env, insn->dst_reg) || in check_atomic()
6941 is_pkt_reg(env, insn->dst_reg) || in check_atomic()
6942 is_flow_key_reg(env, insn->dst_reg) || in check_atomic()
6943 is_sk_reg(env, insn->dst_reg)) { in check_atomic()
6945 insn->dst_reg, in check_atomic()
6946 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in check_atomic()
6947 return -EACCES; in check_atomic()
6950 if (insn->imm & BPF_FETCH) { in check_atomic()
6951 if (insn->imm == BPF_CMPXCHG) in check_atomic()
6954 load_reg = insn->src_reg; in check_atomic()
6964 load_reg = -1; in check_atomic()
6970 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
6971 BPF_SIZE(insn->code), BPF_READ, -1, true, false); in check_atomic()
6973 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
6974 BPF_SIZE(insn->code), BPF_READ, load_reg, in check_atomic()
6980 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
6981 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); in check_atomic()
6992 * 'off' includes 'regno->off', but not its dynamic part (if any).
7008 * read-only. in check_stack_range_initialized()
7013 verbose(env, "invalid zero-sized read\n"); in check_stack_range_initialized()
7014 return -EACCES; in check_stack_range_initialized()
7033 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
7034 min_off = max_off = reg->var_off.value + off; in check_stack_range_initialized()
7041 if (!env->bypass_spec_v1) { in check_stack_range_initialized()
7044 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
7047 return -EACCES; in check_stack_range_initialized()
7055 if (meta && meta->raw_mode) in check_stack_range_initialized()
7058 min_off = reg->smin_value + off; in check_stack_range_initialized()
7059 max_off = reg->smax_value + off; in check_stack_range_initialized()
7062 if (meta && meta->raw_mode) { in check_stack_range_initialized()
7077 int stack_off = -i - 1; in check_stack_range_initialized()
7081 if (state->allocated_stack <= stack_off) in check_stack_range_initialized()
7083 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
7085 return -EACCES; in check_stack_range_initialized()
7088 meta->access_size = access_size; in check_stack_range_initialized()
7089 meta->regno = regno; in check_stack_range_initialized()
7096 slot = -i - 1; in check_stack_range_initialized()
7098 if (state->allocated_stack <= slot) { in check_stack_range_initialized()
7100 return -EFAULT; in check_stack_range_initialized()
7103 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
7107 (*stype == STACK_INVALID && env->allow_uninit_stack)) { in check_stack_range_initialized()
7115 if (is_spilled_reg(&state->stack[spi]) && in check_stack_range_initialized()
7116 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || in check_stack_range_initialized()
7117 env->allow_ptr_leaks)) { in check_stack_range_initialized()
7119 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_range_initialized()
7121 scrub_spilled_slot(&state->stack[spi].slot_type[j]); in check_stack_range_initialized()
7126 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
7128 err_extra, regno, min_off, i - min_off, access_size); in check_stack_range_initialized()
7132 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
7134 err_extra, regno, tn_buf, i - min_off, access_size); in check_stack_range_initialized()
7136 return -EACCES; in check_stack_range_initialized()
7138 /* reading any byte out of 8-byte 'spill_slot' will cause in check_stack_range_initialized()
7141 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_range_initialized()
7142 state->stack[spi].spilled_ptr.parent, in check_stack_range_initialized()
7160 switch (base_type(reg->type)) { in check_helper_mem_access()
7163 return check_packet_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7166 if (meta && meta->raw_mode) { in check_helper_mem_access()
7168 reg_type_str(env, reg->type)); in check_helper_mem_access()
7169 return -EACCES; in check_helper_mem_access()
7171 return check_mem_region_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7172 reg->map_ptr->key_size, false); in check_helper_mem_access()
7174 if (check_map_access_type(env, regno, reg->off, access_size, in check_helper_mem_access()
7175 meta && meta->raw_mode ? BPF_WRITE : in check_helper_mem_access()
7177 return -EACCES; in check_helper_mem_access()
7178 return check_map_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7181 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
7182 if (meta && meta->raw_mode) { in check_helper_mem_access()
7184 reg_type_str(env, reg->type)); in check_helper_mem_access()
7185 return -EACCES; in check_helper_mem_access()
7188 return check_mem_region_access(env, regno, reg->off, in check_helper_mem_access()
7189 access_size, reg->mem_size, in check_helper_mem_access()
7192 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
7193 if (meta && meta->raw_mode) { in check_helper_mem_access()
7195 reg_type_str(env, reg->type)); in check_helper_mem_access()
7196 return -EACCES; in check_helper_mem_access()
7199 max_access = &env->prog->aux->max_rdonly_access; in check_helper_mem_access()
7201 max_access = &env->prog->aux->max_rdwr_access; in check_helper_mem_access()
7203 return check_buffer_access(env, reg, regno, reg->off, in check_helper_mem_access()
7209 regno, reg->off, access_size, in check_helper_mem_access()
7212 return check_ptr_to_btf_access(env, regs, regno, reg->off, in check_helper_mem_access()
7213 access_size, BPF_READ, -1); in check_helper_mem_access()
7220 if (!env->ops->convert_ctx_access) { in check_helper_mem_access()
7221 enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ; in check_helper_mem_access()
7222 int offset = access_size - 1; in check_helper_mem_access()
7224 /* Allow zero-byte read from PTR_TO_CTX */ in check_helper_mem_access()
7226 return zero_size_allowed ? 0 : -EACCES; in check_helper_mem_access()
7228 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, in check_helper_mem_access()
7229 atype, -1, false, false); in check_helper_mem_access()
7234 /* Allow zero-byte read from NULL, regardless of pointer type */ in check_helper_mem_access()
7240 reg_type_str(env, reg->type)); in check_helper_mem_access()
7242 return -EACCES; in check_helper_mem_access()
7249 * @regno is the register containing the access size. regno-1 is the register
7267 meta->msize_max_value = reg->umax_value; in check_mem_size_reg()
7272 if (!tnum_is_const(reg->var_off)) in check_mem_size_reg()
7280 if (reg->smin_value < 0) { in check_mem_size_reg()
7283 return -EACCES; in check_mem_size_reg()
7286 if (reg->umin_value == 0 && !zero_size_allowed) { in check_mem_size_reg()
7287 verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n", in check_mem_size_reg()
7288 regno, reg->umin_value, reg->umax_value); in check_mem_size_reg()
7289 return -EACCES; in check_mem_size_reg()
7292 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { in check_mem_size_reg()
7295 return -EACCES; in check_mem_size_reg()
7297 err = check_helper_mem_access(env, regno - 1, in check_mem_size_reg()
7298 reg->umax_value, in check_mem_size_reg()
7308 bool may_be_null = type_may_be_null(reg->type); in check_mem_reg()
7340 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; in check_kfunc_mem_size_reg()
7341 bool may_be_null = type_may_be_null(mem_reg->type); in check_kfunc_mem_size_reg()
7368 * Two bpf_map_lookups (even with the same key) will have different reg->id.
7369 * Two separate bpf_obj_new will also have different reg->id.
7371 * clears reg->id after value_or_null->value transition, since the verifier only
7375 * reg->id > 0 after value_or_null->value transition. By doing so
7380 * dead-locks.
7384 * cur_state->active_lock remembers which map value element or allocated
7391 struct bpf_verifier_state *cur = env->cur_state; in process_spin_lock()
7392 bool is_const = tnum_is_const(reg->var_off); in process_spin_lock()
7393 u64 val = reg->var_off.value; in process_spin_lock()
7402 return -EINVAL; in process_spin_lock()
7404 if (reg->type == PTR_TO_MAP_VALUE) { in process_spin_lock()
7405 map = reg->map_ptr; in process_spin_lock()
7406 if (!map->btf) { in process_spin_lock()
7409 map->name); in process_spin_lock()
7410 return -EINVAL; in process_spin_lock()
7413 btf = reg->btf; in process_spin_lock()
7419 map ? map->name : "kptr"); in process_spin_lock()
7420 return -EINVAL; in process_spin_lock()
7422 if (rec->spin_lock_off != val + reg->off) { in process_spin_lock()
7424 val + reg->off, rec->spin_lock_off); in process_spin_lock()
7425 return -EINVAL; in process_spin_lock()
7428 if (cur->active_lock.ptr) { in process_spin_lock()
7431 return -EINVAL; in process_spin_lock()
7434 cur->active_lock.ptr = map; in process_spin_lock()
7436 cur->active_lock.ptr = btf; in process_spin_lock()
7437 cur->active_lock.id = reg->id; in process_spin_lock()
7446 if (!cur->active_lock.ptr) { in process_spin_lock()
7448 return -EINVAL; in process_spin_lock()
7450 if (cur->active_lock.ptr != ptr || in process_spin_lock()
7451 cur->active_lock.id != reg->id) { in process_spin_lock()
7453 return -EINVAL; in process_spin_lock()
7458 cur->active_lock.ptr = NULL; in process_spin_lock()
7459 cur->active_lock.id = 0; in process_spin_lock()
7468 bool is_const = tnum_is_const(reg->var_off); in process_timer_func()
7469 struct bpf_map *map = reg->map_ptr; in process_timer_func()
7470 u64 val = reg->var_off.value; in process_timer_func()
7476 return -EINVAL; in process_timer_func()
7478 if (!map->btf) { in process_timer_func()
7480 map->name); in process_timer_func()
7481 return -EINVAL; in process_timer_func()
7483 if (!btf_record_has_field(map->record, BPF_TIMER)) { in process_timer_func()
7484 verbose(env, "map '%s' has no valid bpf_timer\n", map->name); in process_timer_func()
7485 return -EINVAL; in process_timer_func()
7487 if (map->record->timer_off != val + reg->off) { in process_timer_func()
7489 val + reg->off, map->record->timer_off); in process_timer_func()
7490 return -EINVAL; in process_timer_func()
7492 if (meta->map_ptr) { in process_timer_func()
7494 return -EFAULT; in process_timer_func()
7496 meta->map_uid = reg->map_uid; in process_timer_func()
7497 meta->map_ptr = map; in process_timer_func()
7505 struct bpf_map *map_ptr = reg->map_ptr; in process_kptr_func()
7509 if (!tnum_is_const(reg->var_off)) { in process_kptr_func()
7513 return -EINVAL; in process_kptr_func()
7515 if (!map_ptr->btf) { in process_kptr_func()
7517 map_ptr->name); in process_kptr_func()
7518 return -EINVAL; in process_kptr_func()
7520 if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) { in process_kptr_func()
7521 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); in process_kptr_func()
7522 return -EINVAL; in process_kptr_func()
7525 meta->map_ptr = map_ptr; in process_kptr_func()
7526 kptr_off = reg->off + reg->var_off.value; in process_kptr_func()
7527 kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR); in process_kptr_func()
7530 return -EACCES; in process_kptr_func()
7532 if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) { in process_kptr_func()
7534 return -EACCES; in process_kptr_func()
7536 meta->kptr_field = kptr_field; in process_kptr_func()
7555 * reg->type and the memory's in reg->dynptr.type), but there is no support for
7576 return -EFAULT; in process_dynptr_func()
7579 /* MEM_UNINIT - Points to memory that is an appropriate candidate for in process_dynptr_func()
7586 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be in process_dynptr_func()
7590 * None - Points to a initialized dynptr that can be mutated and in process_dynptr_func()
7599 return -EINVAL; in process_dynptr_func()
7605 i, BPF_DW, BPF_WRITE, -1, false, false); in process_dynptr_func()
7612 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ in process_dynptr_func()
7613 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { in process_dynptr_func()
7615 return -EINVAL; in process_dynptr_func()
7622 return -EINVAL; in process_dynptr_func()
7630 return -EINVAL; in process_dynptr_func()
7642 return state->stack[spi].spilled_ptr.ref_obj_id; in iter_ref_obj_id()
7647 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); in is_iter_kfunc()
7652 return meta->kfunc_flags & KF_ITER_NEW; in is_iter_new_kfunc()
7657 return meta->kfunc_flags & KF_ITER_NEXT; in is_iter_next_kfunc()
7662 return meta->kfunc_flags & KF_ITER_DESTROY; in is_iter_destroy_kfunc()
7683 arg = &btf_params(meta->func_proto)[0]; in process_iter_arg()
7684 t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */ in process_iter_arg()
7685 t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */ in process_iter_arg()
7686 nr_slots = t->size / BPF_REG_SIZE; in process_iter_arg()
7692 iter_type_str(meta->btf, btf_id), regno); in process_iter_arg()
7693 return -EINVAL; in process_iter_arg()
7698 i, BPF_DW, BPF_WRITE, -1, false, false); in process_iter_arg()
7703 err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); in process_iter_arg()
7708 err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); in process_iter_arg()
7712 case -EINVAL: in process_iter_arg()
7714 iter_type_str(meta->btf, btf_id), regno); in process_iter_arg()
7716 case -EPROTO: in process_iter_arg()
7717 verbose(env, "expected an RCU CS when using %s\n", meta->func_name); in process_iter_arg()
7731 /* remember meta->iter info for process_iter_next_call() */ in process_iter_arg()
7732 meta->iter.spi = spi; in process_iter_arg()
7733 meta->iter.frameno = reg->frameno; in process_iter_arg()
7734 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); in process_iter_arg()
7747 * stopped at insn_idx with callsites matching those in cur->frame.
7758 for (; sl; sl = sl->next) { in find_prev_entry()
7759 /* If st->branches != 0 state is a part of current DFS verification path, in find_prev_entry()
7762 st = &sl->state; in find_prev_entry()
7763 if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && in find_prev_entry()
7764 st->dfs_depth < cur->dfs_depth) in find_prev_entry()
7780 if (rold->type != SCALAR_VALUE) in maybe_widen_reg()
7782 if (rold->type != rcur->type) in maybe_widen_reg()
7784 if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) in maybe_widen_reg()
7797 for (fr = old->curframe; fr >= 0; fr--) { in widen_imprecise_scalars()
7798 fold = old->frame[fr]; in widen_imprecise_scalars()
7799 fcur = cur->frame[fr]; in widen_imprecise_scalars()
7803 &fold->regs[i], in widen_imprecise_scalars()
7804 &fcur->regs[i], in widen_imprecise_scalars()
7805 &env->idmap_scratch); in widen_imprecise_scalars()
7807 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { in widen_imprecise_scalars()
7808 if (!is_spilled_reg(&fold->stack[i]) || in widen_imprecise_scalars()
7809 !is_spilled_reg(&fcur->stack[i])) in widen_imprecise_scalars()
7813 &fold->stack[i].spilled_ptr, in widen_imprecise_scalars()
7814 &fcur->stack[i].spilled_ptr, in widen_imprecise_scalars()
7815 &env->idmap_scratch); in widen_imprecise_scalars()
7841 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
7902 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in process_iter_next_call()
7903 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; in process_iter_next_call()
7905 int iter_frameno = meta->iter.frameno; in process_iter_next_call()
7906 int iter_spi = meta->iter.spi; in process_iter_next_call()
7910 cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr; in process_iter_next_call()
7912 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && in process_iter_next_call()
7913 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { in process_iter_next_call()
7915 cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); in process_iter_next_call()
7916 return -EFAULT; in process_iter_next_call()
7919 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { in process_iter_next_call()
7923 if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || in process_iter_next_call()
7924 !same_callsites(cur_st->parent, cur_st)) { in process_iter_next_call()
7926 return -EFAULT; in process_iter_next_call()
7928 /* Note cur_st->parent in the call below, it is necessary to skip in process_iter_next_call()
7932 prev_st = find_prev_entry(env, cur_st->parent, insn_idx); in process_iter_next_call()
7936 return -ENOMEM; in process_iter_next_call()
7938 queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; in process_iter_next_call()
7939 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; in process_iter_next_call()
7940 queued_iter->iter.depth++; in process_iter_next_call()
7944 queued_fr = queued_st->frame[queued_st->curframe]; in process_iter_next_call()
7945 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); in process_iter_next_call()
7950 cur_iter->iter.state = BPF_ITER_STATE_DRAINED; in process_iter_next_call()
7951 __mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]); in process_iter_next_call()
7979 return -EINVAL; in int_ptr_type_to_size()
7986 if (!meta->map_ptr) { in resolve_map_arg_type()
7988 verbose(env, "invalid map_ptr to access map->type\n"); in resolve_map_arg_type()
7989 return -EACCES; in resolve_map_arg_type()
7992 switch (meta->map_ptr->map_type) { in resolve_map_arg_type()
7999 return -EINVAL; in resolve_map_arg_type()
8003 if (meta->func_id == BPF_FUNC_map_peek_elem) in resolve_map_arg_type()
8136 enum bpf_reg_type expected, type = reg->type; in check_reg_type()
8143 return -EFAULT; in check_reg_type()
8165 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) { in check_reg_type()
8170 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { in check_reg_type()
8171 expected = compatible->types[i]; in check_reg_type()
8179 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); in check_reg_type()
8181 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); in check_reg_type()
8182 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); in check_reg_type()
8183 return -EACCES; in check_reg_type()
8186 if (base_type(reg->type) != PTR_TO_BTF_ID) in check_reg_type()
8193 func_id_name(meta->func_id), in check_reg_type()
8194 regno, reg_type_str(env, reg->type)); in check_reg_type()
8195 return -EACCES; in check_reg_type()
8200 switch ((int)reg->type) { in check_reg_type()
8212 meta->func_id != BPF_FUNC_sk_release; in check_reg_type()
8214 if (type_may_be_null(reg->type) && in check_reg_type()
8217 return -EACCES; in check_reg_type()
8221 if (!compatible->btf_id) { in check_reg_type()
8223 return -EFAULT; in check_reg_type()
8225 arg_btf_id = compatible->btf_id; in check_reg_type()
8228 if (meta->func_id == BPF_FUNC_kptr_xchg) { in check_reg_type()
8229 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8230 return -EACCES; in check_reg_type()
8234 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", in check_reg_type()
8236 return -EACCES; in check_reg_type()
8239 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in check_reg_type()
8243 regno, btf_type_name(reg->btf, reg->btf_id), in check_reg_type()
8245 return -EACCES; in check_reg_type()
8252 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && in check_reg_type()
8253 meta->func_id != BPF_FUNC_kptr_xchg) { in check_reg_type()
8255 return -EFAULT; in check_reg_type()
8257 if (meta->func_id == BPF_FUNC_kptr_xchg) { in check_reg_type()
8258 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8259 return -EACCES; in check_reg_type()
8269 return -EFAULT; in check_reg_type()
8295 u32 type = reg->type; in check_func_arg_reg_off()
8301 * meta->release_regno. in check_func_arg_reg_off()
8317 if (reg->off) { in check_func_arg_reg_off()
8320 return -EINVAL; in check_func_arg_reg_off()
8350 * can be non-zero. This was already checked above. So pass in check_func_arg_reg_off()
8369 if (arg_type_is_dynptr(fn->arg_type[i])) { in get_dynptr_arg_reg()
8388 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_id()
8389 return reg->id; in dynptr_id()
8393 return state->stack[spi].spilled_ptr.id; in dynptr_id()
8401 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_ref_obj_id()
8402 return reg->ref_obj_id; in dynptr_ref_obj_id()
8406 return state->stack[spi].spilled_ptr.ref_obj_id; in dynptr_ref_obj_id()
8415 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_get_type()
8416 return reg->dynptr.type; in dynptr_get_type()
8418 spi = __get_spi(reg->off); in dynptr_get_type()
8424 return state->stack[spi].spilled_ptr.dynptr.type; in dynptr_get_type()
8430 struct bpf_map *map = reg->map_ptr; in check_reg_const_str()
8436 if (reg->type != PTR_TO_MAP_VALUE) in check_reg_const_str()
8437 return -EINVAL; in check_reg_const_str()
8441 return -EACCES; in check_reg_const_str()
8444 if (!tnum_is_const(reg->var_off)) { in check_reg_const_str()
8446 return -EACCES; in check_reg_const_str()
8449 if (!map->ops->map_direct_value_addr) { in check_reg_const_str()
8451 return -EACCES; in check_reg_const_str()
8454 err = check_map_access(env, regno, reg->off, in check_reg_const_str()
8455 map->value_size - reg->off, false, in check_reg_const_str()
8460 map_off = reg->off + reg->var_off.value; in check_reg_const_str()
8461 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); in check_reg_const_str()
8468 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { in check_reg_const_str()
8469 verbose(env, "string is not zero-terminated\n"); in check_reg_const_str()
8470 return -EINVAL; in check_reg_const_str()
8482 enum bpf_arg_type arg_type = fn->arg_type[arg]; in check_func_arg()
8483 enum bpf_reg_type type = reg->type; in check_func_arg()
8498 return -EACCES; in check_func_arg()
8506 return -EACCES; in check_func_arg()
8524 arg_btf_id = fn->arg_btf_id[arg]; in check_func_arg()
8545 if (reg->type == PTR_TO_STACK) { in check_func_arg()
8547 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { in check_func_arg()
8549 return -EINVAL; in check_func_arg()
8553 return -EINVAL; in check_func_arg()
8555 } else if (!reg->ref_obj_id && !register_is_null(reg)) { in check_func_arg()
8558 return -EINVAL; in check_func_arg()
8560 if (meta->release_regno) { in check_func_arg()
8562 return -EFAULT; in check_func_arg()
8564 meta->release_regno = regno; in check_func_arg()
8567 if (reg->ref_obj_id) { in check_func_arg()
8568 if (meta->ref_obj_id) { in check_func_arg()
8570 regno, reg->ref_obj_id, in check_func_arg()
8571 meta->ref_obj_id); in check_func_arg()
8572 return -EFAULT; in check_func_arg()
8574 meta->ref_obj_id = reg->ref_obj_id; in check_func_arg()
8580 if (meta->map_ptr) { in check_func_arg()
8593 if (meta->map_ptr != reg->map_ptr || in check_func_arg()
8594 meta->map_uid != reg->map_uid) { in check_func_arg()
8597 meta->map_uid, reg->map_uid); in check_func_arg()
8598 return -EINVAL; in check_func_arg()
8601 meta->map_ptr = reg->map_ptr; in check_func_arg()
8602 meta->map_uid = reg->map_uid; in check_func_arg()
8606 * check that [key, key + map->key_size) are within in check_func_arg()
8609 if (!meta->map_ptr) { in check_func_arg()
8615 verbose(env, "invalid map_ptr to access map->key\n"); in check_func_arg()
8616 return -EACCES; in check_func_arg()
8619 meta->map_ptr->key_size, false, in check_func_arg()
8627 * check [value, value + map->value_size) validity in check_func_arg()
8629 if (!meta->map_ptr) { in check_func_arg()
8631 verbose(env, "invalid map_ptr to access map->value\n"); in check_func_arg()
8632 return -EACCES; in check_func_arg()
8634 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
8636 meta->map_ptr->value_size, false, in check_func_arg()
8640 if (!reg->btf_id) { in check_func_arg()
8642 return -EACCES; in check_func_arg()
8644 meta->ret_btf = reg->btf; in check_func_arg()
8645 meta->ret_btf_id = reg->btf_id; in check_func_arg()
8650 return -EACCES; in check_func_arg()
8652 if (meta->func_id == BPF_FUNC_spin_lock) { in check_func_arg()
8656 } else if (meta->func_id == BPF_FUNC_spin_unlock) { in check_func_arg()
8662 return -EFAULT; in check_func_arg()
8671 meta->subprogno = reg->subprogno; in check_func_arg()
8677 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
8680 fn->arg_size[arg], false, in check_func_arg()
8696 if (!tnum_is_const(reg->var_off)) { in check_func_arg()
8699 return -EACCES; in check_func_arg()
8701 meta->mem_size = reg->var_off.value; in check_func_arg()
8736 enum bpf_attach_type eatype = env->prog->expected_attach_type; in may_update_sockmap()
8737 enum bpf_prog_type type = resolve_prog_type(env->prog); in may_update_sockmap()
8768 return env->prog->jit_requested && in allow_tail_call_in_subprogs()
8779 switch (map->map_type) { in check_map_func_compatibility()
8825 /* Restrict bpf side of cpumap and xskmap, open when use-cases in check_map_func_compatibility()
8909 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) in check_map_func_compatibility()
8911 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { in check_map_func_compatibility()
8912 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in check_map_func_compatibility()
8913 return -EINVAL; in check_map_func_compatibility()
8921 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) in check_map_func_compatibility()
8930 if (map->map_type != BPF_MAP_TYPE_RINGBUF) in check_map_func_compatibility()
8934 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) in check_map_func_compatibility()
8938 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) in check_map_func_compatibility()
8943 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) in check_map_func_compatibility()
8947 if (map->map_type != BPF_MAP_TYPE_DEVMAP && in check_map_func_compatibility()
8948 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && in check_map_func_compatibility()
8949 map->map_type != BPF_MAP_TYPE_CPUMAP && in check_map_func_compatibility()
8950 map->map_type != BPF_MAP_TYPE_XSKMAP) in check_map_func_compatibility()
8956 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) in check_map_func_compatibility()
8962 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
8966 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in check_map_func_compatibility()
8967 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in check_map_func_compatibility()
8971 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && in check_map_func_compatibility()
8972 map->map_type != BPF_MAP_TYPE_SOCKMAP && in check_map_func_compatibility()
8973 map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
8977 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
8978 map->map_type != BPF_MAP_TYPE_STACK) in check_map_func_compatibility()
8983 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
8984 map->map_type != BPF_MAP_TYPE_STACK && in check_map_func_compatibility()
8985 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) in check_map_func_compatibility()
8989 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in check_map_func_compatibility()
8990 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in check_map_func_compatibility()
8991 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) in check_map_func_compatibility()
8996 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) in check_map_func_compatibility()
9001 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) in check_map_func_compatibility()
9006 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) in check_map_func_compatibility()
9011 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) in check_map_func_compatibility()
9021 map->map_type, func_id_name(func_id), func_id); in check_map_func_compatibility()
9022 return -EINVAL; in check_map_func_compatibility()
9029 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
9031 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
9033 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
9035 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
9037 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
9049 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; in check_args_pair_invalid()
9050 bool has_size = fn->arg_size[arg] != 0; in check_args_pair_invalid()
9053 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) in check_args_pair_invalid()
9054 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); in check_args_pair_invalid()
9056 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) in check_args_pair_invalid()
9069 if (arg_type_is_mem_size(fn->arg1_type) || in check_arg_pair_ok()
9084 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { in check_btf_id_ok()
9085 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) in check_btf_id_ok()
9086 return !!fn->arg_btf_id[i]; in check_btf_id_ok()
9087 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) in check_btf_id_ok()
9088 return fn->arg_btf_id[i] == BPF_PTR_POISON; in check_btf_id_ok()
9089 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && in check_btf_id_ok()
9091 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || in check_btf_id_ok()
9092 !(fn->arg_type[i] & MEM_FIXED_SIZE))) in check_btf_id_ok()
9103 check_btf_id_ok(fn) ? 0 : -EINVAL; in check_func_proto()
9117 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in clear_all_pkt_pointers()
9124 AT_PKT_END = -1,
9125 BEYOND_PKT_END = -2,
9130 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_pkt_end()
9131 struct bpf_reg_state *reg = &state->regs[regn]; in mark_pkt_end()
9133 if (reg->type != PTR_TO_PACKET) in mark_pkt_end()
9144 reg->range = BEYOND_PKT_END; in mark_pkt_end()
9146 reg->range = AT_PKT_END; in mark_pkt_end()
9163 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in release_reference()
9164 if (reg->ref_obj_id == ref_obj_id) in release_reference()
9176 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in invalidate_non_owning_refs()
9177 if (type_is_non_owning_ref(reg->type)) in invalidate_non_owning_refs()
9187 /* after the call registers r0 - r5 were scratched */ in clear_caller_saved_regs()
9210 if (state->curframe + 1 >= MAX_CALL_FRAMES) { in setup_func_entry()
9212 state->curframe + 2); in setup_func_entry()
9213 return -E2BIG; in setup_func_entry()
9216 if (state->frame[state->curframe + 1]) { in setup_func_entry()
9218 state->curframe + 1); in setup_func_entry()
9219 return -EFAULT; in setup_func_entry()
9222 caller = state->frame[state->curframe]; in setup_func_entry()
9225 return -ENOMEM; in setup_func_entry()
9226 state->frame[state->curframe + 1] = callee; in setup_func_entry()
9228 /* callee cannot access r0, r6 - r9 for reading and has to write in setup_func_entry()
9235 state->curframe + 1 /* frameno within this callchain */, in setup_func_entry()
9244 state->curframe++; in setup_func_entry()
9250 state->frame[state->curframe + 1] = NULL; in setup_func_entry()
9259 struct bpf_verifier_log *log = &env->log; in btf_check_func_arg_match()
9270 for (i = 0; i < sub->arg_cnt; i++) { in btf_check_func_arg_match()
9273 struct bpf_subprog_arg_info *arg = &sub->args[i]; in btf_check_func_arg_match()
9275 if (arg->arg_type == ARG_ANYTHING) { in btf_check_func_arg_match()
9276 if (reg->type != SCALAR_VALUE) { in btf_check_func_arg_match()
9278 return -EINVAL; in btf_check_func_arg_match()
9280 } else if (arg->arg_type == ARG_PTR_TO_CTX) { in btf_check_func_arg_match()
9287 if (reg->type != PTR_TO_CTX) { in btf_check_func_arg_match()
9289 return -EINVAL; in btf_check_func_arg_match()
9291 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in btf_check_func_arg_match()
9295 if (check_mem_reg(env, reg, regno, arg->mem_size)) in btf_check_func_arg_match()
9296 return -EINVAL; in btf_check_func_arg_match()
9297 if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) { in btf_check_func_arg_match()
9298 bpf_log(log, "arg#%d is expected to be non-NULL\n", i); in btf_check_func_arg_match()
9299 return -EINVAL; in btf_check_func_arg_match()
9301 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in btf_check_func_arg_match()
9302 ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); in btf_check_func_arg_match()
9307 i, arg->arg_type); in btf_check_func_arg_match()
9308 return -EFAULT; in btf_check_func_arg_match()
9317 * EFAULT - there is a verifier bug. Abort verification.
9318 * EINVAL - there is a type mismatch or BTF is not available.
9319 * 0 - BTF matches with what bpf_reg_state expects.
9325 struct bpf_prog *prog = env->prog; in btf_check_subprog_call()
9326 struct btf *btf = prog->aux->btf; in btf_check_subprog_call()
9330 if (!prog->aux->func_info) in btf_check_subprog_call()
9331 return -EINVAL; in btf_check_subprog_call()
9333 btf_id = prog->aux->func_info[subprog].type_id; in btf_check_subprog_call()
9335 return -EFAULT; in btf_check_subprog_call()
9337 if (prog->aux->func_info_aux[subprog].unreliable) in btf_check_subprog_call()
9338 return -EINVAL; in btf_check_subprog_call()
9346 prog->aux->func_info_aux[subprog].unreliable = true; in btf_check_subprog_call()
9354 struct bpf_verifier_state *state = env->cur_state, *callback_state; in push_callback_call()
9358 caller = state->frame[state->curframe]; in push_callback_call()
9359 err = btf_check_subprog_call(env, subprog, caller->regs); in push_callback_call()
9360 if (err == -EFAULT) in push_callback_call()
9367 env->subprog_info[subprog].is_cb = true; in push_callback_call()
9369 !is_sync_callback_calling_kfunc(insn->imm)) { in push_callback_call()
9370 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", in push_callback_call()
9371 func_id_name(insn->imm), insn->imm); in push_callback_call()
9372 return -EFAULT; in push_callback_call()
9374 !is_callback_calling_function(insn->imm)) { /* helper */ in push_callback_call()
9375 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", in push_callback_call()
9376 func_id_name(insn->imm), insn->imm); in push_callback_call()
9377 return -EFAULT; in push_callback_call()
9380 if (insn->code == (BPF_JMP | BPF_CALL) && in push_callback_call()
9381 insn->src_reg == 0 && in push_callback_call()
9382 insn->imm == BPF_FUNC_timer_set_callback) { in push_callback_call()
9386 env->subprog_info[subprog].is_async_cb = true; in push_callback_call()
9387 async_cb = push_async_cb(env, env->subprog_info[subprog].start, in push_callback_call()
9390 return -EFAULT; in push_callback_call()
9391 callee = async_cb->frame[0]; in push_callback_call()
9392 callee->async_entry_cnt = caller->async_entry_cnt + 1; in push_callback_call()
9405 callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); in push_callback_call()
9407 return -ENOMEM; in push_callback_call()
9414 callback_state->callback_unroll_depth++; in push_callback_call()
9415 callback_state->frame[callback_state->curframe - 1]->callback_depth++; in push_callback_call()
9416 caller->callback_depth = 0; in push_callback_call()
9423 struct bpf_verifier_state *state = env->cur_state; in check_func_call()
9427 target_insn = *insn_idx + insn->imm + 1; in check_func_call()
9431 return -EFAULT; in check_func_call()
9434 caller = state->frame[state->curframe]; in check_func_call()
9435 err = btf_check_subprog_call(env, subprog, caller->regs); in check_func_call()
9436 if (err == -EFAULT) in check_func_call()
9450 subprog_aux(env, subprog)->called = true; in check_func_call()
9451 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9453 /* All global functions return a 64-bit SCALAR_VALUE */ in check_func_call()
9454 mark_reg_unknown(env, caller->regs, BPF_REG_0); in check_func_call()
9455 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in check_func_call()
9468 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9471 *insn_idx = env->subprog_info[subprog].start - 1; in check_func_call()
9473 if (env->log.level & BPF_LOG_LEVEL) { in check_func_call()
9477 print_verifier_state(env, state->frame[state->curframe], true); in check_func_call()
9492 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
9494 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
9495 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
9496 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9498 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
9499 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
9500 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9503 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
9506 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
9516 /* copy r1 - r5 args that callee can access. The copy includes parent in set_callee_state()
9520 callee->regs[i] = caller->regs[i]; in set_callee_state()
9529 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; in set_map_elem_callback_state()
9535 return -EINVAL; in set_map_elem_callback_state()
9538 map = BPF_MAP_PTR(insn_aux->map_ptr_state); in set_map_elem_callback_state()
9539 if (!map->ops->map_set_for_each_callback_args || in set_map_elem_callback_state()
9540 !map->ops->map_for_each_callback) { in set_map_elem_callback_state()
9542 return -ENOTSUPP; in set_map_elem_callback_state()
9545 err = map->ops->map_set_for_each_callback_args(env, caller, callee); in set_map_elem_callback_state()
9549 callee->in_callback_fn = true; in set_map_elem_callback_state()
9550 callee->callback_ret_range = retval_range(0, 1); in set_map_elem_callback_state()
9563 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
9564 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
9567 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
9568 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
9569 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
9571 callee->in_callback_fn = true; in set_loop_callback_state()
9572 callee->callback_ret_range = retval_range(0, 1); in set_loop_callback_state()
9581 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; in set_timer_callback_state()
9586 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
9587 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
9588 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
9590 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
9591 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
9592 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
9594 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
9595 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
9596 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
9599 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
9600 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
9601 callee->in_async_callback_fn = true; in set_timer_callback_state()
9602 callee->callback_ret_range = retval_range(0, 1); in set_timer_callback_state()
9616 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
9618 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
9619 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
9620 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
9621 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA]; in set_find_vma_callback_state()
9624 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
9627 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
9628 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
9629 callee->in_callback_fn = true; in set_find_vma_callback_state()
9630 callee->callback_ret_range = retval_range(0, 1); in set_find_vma_callback_state()
9643 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
9644 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
9645 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
9648 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
9649 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
9650 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
9652 callee->in_callback_fn = true; in set_user_ringbuf_callback_state()
9653 callee->callback_ret_range = retval_range(0, 1); in set_user_ringbuf_callback_state()
9671 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, in set_rbtree_add_callback_state()
9673 if (!field || !field->graph_root.value_btf_id) in set_rbtree_add_callback_state()
9674 return -EFAULT; in set_rbtree_add_callback_state()
9676 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); in set_rbtree_add_callback_state()
9677 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
9678 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); in set_rbtree_add_callback_state()
9679 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
9681 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
9682 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
9683 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
9684 callee->in_callback_fn = true; in set_rbtree_add_callback_state()
9685 callee->callback_ret_range = retval_range(0, 1); in set_rbtree_add_callback_state()
9697 struct bpf_verifier_state *state = env->cur_state; in in_rbtree_lock_required_cb()
9698 struct bpf_insn *insn = env->prog->insnsi; in in_rbtree_lock_required_cb()
9702 if (!state->curframe) in in_rbtree_lock_required_cb()
9705 callee = state->frame[state->curframe]; in in_rbtree_lock_required_cb()
9707 if (!callee->in_callback_fn) in in_rbtree_lock_required_cb()
9710 kfunc_btf_id = insn[callee->callsite].imm; in in_rbtree_lock_required_cb()
9716 return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; in retval_range_within()
9721 struct bpf_verifier_state *state = env->cur_state, *prev_st; in prepare_func_exit()
9727 callee = state->frame[state->curframe]; in prepare_func_exit()
9728 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
9729 if (r0->type == PTR_TO_STACK) { in prepare_func_exit()
9737 return -EINVAL; in prepare_func_exit()
9740 caller = state->frame[state->curframe - 1]; in prepare_func_exit()
9741 if (callee->in_callback_fn) { in prepare_func_exit()
9742 if (r0->type != SCALAR_VALUE) { in prepare_func_exit()
9744 return -EACCES; in prepare_func_exit()
9748 err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64); in prepare_func_exit()
9754 if (!retval_range_within(callee->callback_ret_range, r0)) { in prepare_func_exit()
9755 verbose_invalid_scalar(env, r0, callee->callback_ret_range, in prepare_func_exit()
9757 return -EINVAL; in prepare_func_exit()
9759 if (!calls_callback(env, callee->callsite)) { in prepare_func_exit()
9761 *insn_idx, callee->callsite); in prepare_func_exit()
9762 return -EFAULT; in prepare_func_exit()
9766 caller->regs[BPF_REG_0] = *r0; in prepare_func_exit()
9774 if (!callee->in_callback_fn) { in prepare_func_exit()
9785 in_callback_fn = callee->in_callback_fn; in prepare_func_exit()
9787 *insn_idx = callee->callsite; in prepare_func_exit()
9789 *insn_idx = callee->callsite + 1; in prepare_func_exit()
9791 if (env->log.level & BPF_LOG_LEVEL) { in prepare_func_exit()
9800 state->frame[state->curframe--] = NULL; in prepare_func_exit()
9805 * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } in prepare_func_exit()
9838 ret_reg->smax_value = meta->msize_max_value; in do_refine_retval_range()
9839 ret_reg->s32_max_value = meta->msize_max_value; in do_refine_retval_range()
9840 ret_reg->smin_value = -MAX_ERRNO; in do_refine_retval_range()
9841 ret_reg->s32_min_value = -MAX_ERRNO; in do_refine_retval_range()
9845 ret_reg->umax_value = nr_cpu_ids - 1; in do_refine_retval_range()
9846 ret_reg->u32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
9847 ret_reg->smax_value = nr_cpu_ids - 1; in do_refine_retval_range()
9848 ret_reg->s32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
9849 ret_reg->umin_value = 0; in do_refine_retval_range()
9850 ret_reg->u32_min_value = 0; in do_refine_retval_range()
9851 ret_reg->smin_value = 0; in do_refine_retval_range()
9852 ret_reg->s32_min_value = 0; in do_refine_retval_range()
9864 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
9865 struct bpf_map *map = meta->map_ptr; in record_func_map()
9881 return -EINVAL; in record_func_map()
9884 /* In case of read-only, some additional restrictions in record_func_map()
9888 if ((map->map_flags & BPF_F_RDONLY_PROG) && in record_func_map()
9894 return -EACCES; in record_func_map()
9897 if (!BPF_MAP_PTR(aux->map_ptr_state)) in record_func_map()
9898 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
9899 !meta->map_ptr->bypass_spec_v1); in record_func_map()
9900 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) in record_func_map()
9902 !meta->map_ptr->bypass_spec_v1); in record_func_map()
9910 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_key()
9912 struct bpf_map *map = meta->map_ptr; in record_func_key()
9918 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { in record_func_key()
9920 return -EINVAL; in record_func_key()
9924 val = reg->var_off.value; in record_func_key()
9925 max = map->max_entries; in record_func_key()
9949 if (!exception_exit && state->frameno && !state->in_callback_fn) in check_reference_leak()
9952 for (i = 0; i < state->acquired_refs; i++) { in check_reference_leak()
9953 if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno) in check_reference_leak()
9956 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
9959 return refs_lingering ? -EINVAL : 0; in check_reference_leak()
9967 struct bpf_map *fmt_map = fmt_reg->map_ptr; in check_bpf_snprintf_call()
9974 if (data_len_reg->var_off.value % 8) in check_bpf_snprintf_call()
9975 return -EINVAL; in check_bpf_snprintf_call()
9976 num_args = data_len_reg->var_off.value / 8; in check_bpf_snprintf_call()
9981 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; in check_bpf_snprintf_call()
9982 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, in check_bpf_snprintf_call()
9986 return -EFAULT; in check_bpf_snprintf_call()
10002 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_get_func_ip()
10006 if (!bpf_prog_has_trampoline(env->prog)) { in check_get_func_ip()
10009 return -ENOTSUPP; in check_get_func_ip()
10018 return -ENOTSUPP; in check_get_func_ip()
10023 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
10040 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; in update_loop_inline_state()
10042 if (!state->initialized) { in update_loop_inline_state()
10043 state->initialized = 1; in update_loop_inline_state()
10044 state->fit_for_inline = loop_flag_is_zero(env); in update_loop_inline_state()
10045 state->callback_subprogno = subprogno; in update_loop_inline_state()
10049 if (!state->fit_for_inline) in update_loop_inline_state()
10052 state->fit_for_inline = (loop_flag_is_zero(env) && in update_loop_inline_state()
10053 state->callback_subprogno == subprogno); in update_loop_inline_state()
10059 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_helper_call()
10071 func_id = insn->imm; in check_helper_call()
10075 return -EINVAL; in check_helper_call()
10078 if (env->ops->get_func_proto) in check_helper_call()
10079 fn = env->ops->get_func_proto(func_id, env->prog); in check_helper_call()
10083 return -EINVAL; in check_helper_call()
10086 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in check_helper_call()
10087 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
10088 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); in check_helper_call()
10089 return -EINVAL; in check_helper_call()
10092 if (fn->allowed && !fn->allowed(env->prog)) { in check_helper_call()
10094 return -EINVAL; in check_helper_call()
10097 if (!env->prog->aux->sleepable && fn->might_sleep) { in check_helper_call()
10098 verbose(env, "helper call might sleep in a non-sleepable prog\n"); in check_helper_call()
10099 return -EINVAL; in check_helper_call()
10103 changes_data = bpf_helper_changes_pkt_data(fn->func); in check_helper_call()
10104 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { in check_helper_call()
10107 return -EINVAL; in check_helper_call()
10111 meta.pkt_access = fn->pkt_access; in check_helper_call()
10120 if (env->cur_state->active_rcu_lock) { in check_helper_call()
10121 if (fn->might_sleep) { in check_helper_call()
10124 return -EINVAL; in check_helper_call()
10127 if (env->prog->aux->sleepable && is_storage_get_function(func_id)) in check_helper_call()
10128 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
10152 BPF_WRITE, -1, false, false); in check_helper_call()
10160 err = -EINVAL; in check_helper_call()
10165 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { in check_helper_call()
10168 return -EFAULT; in check_helper_call()
10179 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in check_helper_call()
10180 if (reg->ref_obj_id == ref_obj_id) { in check_helper_call()
10181 if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) { in check_helper_call()
10182 reg->ref_obj_id = 0; in check_helper_call()
10183 reg->type &= ~MEM_ALLOC; in check_helper_call()
10184 reg->type |= MEM_RCU; in check_helper_call()
10219 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); in check_helper_call()
10220 return -EINVAL; in check_helper_call()
10246 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { in check_helper_call()
10250 cur_func(env)->callback_depth = 0; in check_helper_call()
10251 if (env->log.level & BPF_LOG_LEVEL2) in check_helper_call()
10253 env->cur_state->curframe); in check_helper_call()
10260 return -EACCES; in check_helper_call()
10265 env->prog->expected_attach_type == BPF_LSM_CGROUP) { in check_helper_call()
10266 if (!env->prog->aux->attach_func_proto->type) { in check_helper_call()
10271 return -EINVAL; in check_helper_call()
10282 return -EFAULT; in check_helper_call()
10287 return -EFAULT; in check_helper_call()
10291 return -EFAULT; in check_helper_call()
10318 return -EFAULT; in check_helper_call()
10322 return -EFAULT; in check_helper_call()
10338 if (reg->type & MEM_RCU) { in check_helper_call()
10339 type = btf_type_by_id(reg->btf, reg->btf_id); in check_helper_call()
10342 return -EFAULT; in check_helper_call()
10345 env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true; in check_helper_call()
10364 /* helper call returns 64-bit value. */ in check_helper_call()
10368 ret_type = fn->ret_type; in check_helper_call()
10389 return -EINVAL; in check_helper_call()
10395 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) { in check_helper_call()
10396 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10430 tname = btf_name_by_offset(meta.ret_btf, t->name_off); in check_helper_call()
10433 return -EINVAL; in check_helper_call()
10463 ret_btf = meta.kptr_field->kptr.btf; in check_helper_call()
10464 ret_btf_id = meta.kptr_field->kptr.btf_id; in check_helper_call()
10467 if (meta.kptr_field->type == BPF_KPTR_PERCPU) in check_helper_call()
10471 if (fn->ret_btf_id == BPF_PTR_POISON) { in check_helper_call()
10473 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", in check_helper_call()
10475 return -EINVAL; in check_helper_call()
10478 ret_btf_id = *fn->ret_btf_id; in check_helper_call()
10484 return -EINVAL; in check_helper_call()
10493 return -EINVAL; in check_helper_call()
10497 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10502 return -EFAULT; in check_helper_call()
10522 err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); in check_helper_call()
10532 !env->prog->has_callchain_buf) { in check_helper_call()
10539 err = -ENOTSUPP; in check_helper_call()
10547 env->prog->has_callchain_buf = true; in check_helper_call()
10551 env->prog->call_get_stack = true; in check_helper_call()
10555 return -ENOTSUPP; in check_helper_call()
10556 env->prog->call_get_func_ip = true; in check_helper_call()
10574 reg->live |= REG_LIVE_WRITTEN; in mark_btf_func_reg_size()
10575 reg->subreg_def = reg_size == sizeof(u64) ? in mark_btf_func_reg_size()
10576 DEF_NOT_SUBREG : env->insn_idx + 1; in mark_btf_func_reg_size()
10581 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in mark_btf_func_reg_size()
10583 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); in mark_btf_func_reg_size()
10590 return meta->kfunc_flags & KF_ACQUIRE; in is_kfunc_acquire()
10595 return meta->kfunc_flags & KF_RELEASE; in is_kfunc_release()
10600 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); in is_kfunc_trusted_args()
10605 return meta->kfunc_flags & KF_SLEEPABLE; in is_kfunc_sleepable()
10610 return meta->kfunc_flags & KF_DESTRUCTIVE; in is_kfunc_destructive()
10615 return meta->kfunc_flags & KF_RCU; in is_kfunc_rcu()
10620 return meta->kfunc_flags & KF_RCU_PROTECTED; in is_kfunc_rcu_protected()
10631 param_name = btf_name_by_offset(btf, arg->name_off); in __kfunc_param_match_suffix()
10637 param_name += len - suffix_len; in __kfunc_param_match_suffix()
10647 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_mem_size()
10648 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_mem_size()
10660 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_const_mem_size()
10661 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_const_mem_size()
10714 param_name = btf_name_by_offset(btf, arg->name_off); in is_kfunc_arg_scalar_with_name()
10747 t = btf_type_skip_modifiers(btf, arg->type, NULL); in BTF_ID()
10752 t = btf_type_skip_modifiers(btf, t->type, &res_id); in BTF_ID()
10788 t = btf_type_resolve_func_ptr(btf, arg->type, NULL); in is_kfunc_arg_callback()
10810 member_type = btf_type_skip_modifiers(btf, member->type, NULL); in __btf_type_is_scalar_struct()
10822 if (!array->nelems) in __btf_type_is_scalar_struct()
10824 member_type = btf_type_skip_modifiers(btf, array->type, NULL); in __btf_type_is_scalar_struct()
10936 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in BTF_ID()
10937 meta->arg_owning_ref) { in BTF_ID()
10941 return meta->kfunc_flags & KF_RET_NULL; in BTF_ID()
10946 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; in is_kfunc_bpf_rcu_read_lock()
10951 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; in is_kfunc_bpf_rcu_read_unlock()
10966 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) in get_kfunc_ptr_arg_type()
10974 if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) in get_kfunc_ptr_arg_type()
10977 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
10980 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
10983 if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
10989 if (is_kfunc_arg_list_head(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
10992 if (is_kfunc_arg_list_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
10995 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
10998 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11001 if (is_kfunc_arg_const_str(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11004 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { in get_kfunc_ptr_arg_type()
11007 meta->func_name, argno, btf_type_str(ref_t), ref_tname); in get_kfunc_ptr_arg_type()
11008 return -EINVAL; in get_kfunc_ptr_arg_type()
11013 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11016 if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) in get_kfunc_ptr_arg_type()
11020 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) || in get_kfunc_ptr_arg_type()
11021 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))) in get_kfunc_ptr_arg_type()
11029 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && in get_kfunc_ptr_arg_type()
11033 return -EINVAL; in get_kfunc_ptr_arg_type()
11051 if (base_type(reg->type) == PTR_TO_BTF_ID) { in process_kf_arg_ptr_to_btf_id()
11052 reg_btf = reg->btf; in process_kf_arg_ptr_to_btf_id()
11053 reg_ref_id = reg->btf_id; in process_kf_arg_ptr_to_btf_id()
11056 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; in process_kf_arg_ptr_to_btf_id()
11060 * or releasing a reference, or are no-cast aliases. We do _not_ in process_kf_arg_ptr_to_btf_id()
11084 (is_kfunc_release(meta) && reg->ref_obj_id) || in process_kf_arg_ptr_to_btf_id()
11085 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) in process_kf_arg_ptr_to_btf_id()
11088 WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off); in process_kf_arg_ptr_to_btf_id()
11091 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); in process_kf_arg_ptr_to_btf_id()
11092 …if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type… in process_kf_arg_ptr_to_btf_id()
11094 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, in process_kf_arg_ptr_to_btf_id()
11096 return -EINVAL; in process_kf_arg_ptr_to_btf_id()
11103 struct bpf_verifier_state *state = env->cur_state; in ref_set_non_owning()
11106 if (!state->active_lock.ptr) { in ref_set_non_owning()
11108 return -EFAULT; in ref_set_non_owning()
11111 if (type_flag(reg->type) & NON_OWN_REF) { in ref_set_non_owning()
11113 return -EFAULT; in ref_set_non_owning()
11116 reg->type |= NON_OWN_REF; in ref_set_non_owning()
11117 if (rec->refcount_off >= 0) in ref_set_non_owning()
11118 reg->type |= MEM_RCU; in ref_set_non_owning()
11133 "owning -> non-owning conversion\n"); in ref_convert_owning_non_owning()
11134 return -EFAULT; in ref_convert_owning_non_owning()
11137 for (i = 0; i < state->acquired_refs; i++) { in ref_convert_owning_non_owning()
11138 if (state->refs[i].id != ref_obj_id) in ref_convert_owning_non_owning()
11144 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in ref_convert_owning_non_owning()
11145 if (reg->ref_obj_id == ref_obj_id) { in ref_convert_owning_non_owning()
11146 reg->ref_obj_id = 0; in ref_convert_owning_non_owning()
11154 return -EFAULT; in ref_convert_owning_non_owning()
11166 * allocation, the verifier preserves a unique reg->id for it.
11178 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
11179 * allocated objects is the reg->btf pointer.
11181 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
11192 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
11194 * will get different reg->id assigned to each lookup, hence different
11197 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
11198 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
11199 * returned from bpf_obj_new. Each allocation receives a new reg->id.
11206 switch ((int)reg->type) { in check_reg_allocation_locked()
11208 ptr = reg->map_ptr; in check_reg_allocation_locked()
11211 ptr = reg->btf; in check_reg_allocation_locked()
11215 return -EFAULT; in check_reg_allocation_locked()
11217 id = reg->id; in check_reg_allocation_locked()
11219 if (!env->cur_state->active_lock.ptr) in check_reg_allocation_locked()
11220 return -EINVAL; in check_reg_allocation_locked()
11221 if (env->cur_state->active_lock.ptr != ptr || in check_reg_allocation_locked()
11222 env->cur_state->active_lock.id != id) { in check_reg_allocation_locked()
11224 return -EINVAL; in check_reg_allocation_locked()
11257 return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && in is_bpf_throw_kfunc()
11258 insn->imm == special_kfunc_list[KF_bpf_throw]; in is_bpf_throw_kfunc()
11330 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_root()
11332 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
11335 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_root()
11336 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
11339 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_root()
11343 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
11347 head_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_root()
11351 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
11357 rec->spin_lock_off, head_type_name); in __process_kf_arg_ptr_to_graph_root()
11358 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
11363 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
11374 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_head()
11382 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_root()
11398 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_node()
11400 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
11403 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_node()
11404 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
11407 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_node()
11411 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11414 node_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_node()
11416 if (!field || field->offset != node_off) { in __process_kf_arg_ptr_to_graph_node()
11418 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11423 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); in __process_kf_arg_ptr_to_graph_node()
11424 t = btf_type_by_id(reg->btf, reg->btf_id); in __process_kf_arg_ptr_to_graph_node()
11425 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, in __process_kf_arg_ptr_to_graph_node()
11426 field->graph_root.value_btf_id, true)) { in __process_kf_arg_ptr_to_graph_node()
11431 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
11432 btf_name_by_offset(field->graph_root.btf, et->name_off), in __process_kf_arg_ptr_to_graph_node()
11433 node_off, btf_name_by_offset(reg->btf, t->name_off)); in __process_kf_arg_ptr_to_graph_node()
11434 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11436 meta->arg_btf = reg->btf; in __process_kf_arg_ptr_to_graph_node()
11437 meta->arg_btf_id = reg->btf_id; in __process_kf_arg_ptr_to_graph_node()
11439 if (node_off != field->graph_root.node_offset) { in __process_kf_arg_ptr_to_graph_node()
11442 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
11443 btf_name_by_offset(field->graph_root.btf, et->name_off)); in __process_kf_arg_ptr_to_graph_node()
11444 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11456 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_node()
11465 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_node()
11470 * LSM hooks and iters (both sleepable and non-sleepable) are safe.
11476 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_css_task_iter_allowlist()
11482 if (env->prog->expected_attach_type == BPF_TRACE_ITER) in check_css_task_iter_allowlist()
11486 return env->prog->aux->sleepable; in check_css_task_iter_allowlist()
11493 const char *func_name = meta->func_name, *ref_tname; in check_kfunc_args()
11494 const struct btf *btf = meta->btf; in check_kfunc_args()
11500 args = (const struct btf_param *)(meta->func_proto + 1); in check_kfunc_args()
11501 nargs = btf_type_vlen(meta->func_proto); in check_kfunc_args()
11505 return -EINVAL; in check_kfunc_args()
11525 if (reg->type != SCALAR_VALUE) { in check_kfunc_args()
11527 return -EINVAL; in check_kfunc_args()
11530 if (is_kfunc_arg_constant(meta->btf, &args[i])) { in check_kfunc_args()
11531 if (meta->arg_constant.found) { in check_kfunc_args()
11533 return -EFAULT; in check_kfunc_args()
11535 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
11537 return -EINVAL; in check_kfunc_args()
11542 meta->arg_constant.found = true; in check_kfunc_args()
11543 meta->arg_constant.value = reg->var_off.value; in check_kfunc_args()
11545 meta->r0_rdonly = true; in check_kfunc_args()
11552 if (meta->r0_size) { in check_kfunc_args()
11554 return -EINVAL; in check_kfunc_args()
11557 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
11559 return -EINVAL; in check_kfunc_args()
11562 meta->r0_size = reg->var_off.value; in check_kfunc_args()
11572 return -EINVAL; in check_kfunc_args()
11576 (register_is_null(reg) || type_may_be_null(reg->type)) && in check_kfunc_args()
11577 !is_kfunc_arg_nullable(meta->btf, &args[i])) { in check_kfunc_args()
11579 return -EACCES; in check_kfunc_args()
11582 if (reg->ref_obj_id) { in check_kfunc_args()
11583 if (is_kfunc_release(meta) && meta->ref_obj_id) { in check_kfunc_args()
11585 regno, reg->ref_obj_id, in check_kfunc_args()
11586 meta->ref_obj_id); in check_kfunc_args()
11587 return -EFAULT; in check_kfunc_args()
11589 meta->ref_obj_id = reg->ref_obj_id; in check_kfunc_args()
11591 meta->release_regno = regno; in check_kfunc_args()
11594 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); in check_kfunc_args()
11595 ref_tname = btf_name_by_offset(btf, ref_t->name_off); in check_kfunc_args()
11612 return -EINVAL; in check_kfunc_args()
11616 return -EINVAL; in check_kfunc_args()
11640 return -EFAULT; in check_kfunc_args()
11643 if (is_kfunc_release(meta) && reg->ref_obj_id) in check_kfunc_args()
11651 if (reg->type != PTR_TO_CTX) { in check_kfunc_args()
11653 return -EINVAL; in check_kfunc_args()
11656 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { in check_kfunc_args()
11657 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); in check_kfunc_args()
11659 return -EINVAL; in check_kfunc_args()
11660 meta->ret_btf_id = ret; in check_kfunc_args()
11664 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
11665 if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { in check_kfunc_args()
11667 return -EINVAL; in check_kfunc_args()
11669 } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { in check_kfunc_args()
11670 if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { in check_kfunc_args()
11672 return -EINVAL; in check_kfunc_args()
11676 return -EINVAL; in check_kfunc_args()
11678 if (!reg->ref_obj_id) { in check_kfunc_args()
11680 return -EINVAL; in check_kfunc_args()
11682 if (meta->btf == btf_vmlinux) { in check_kfunc_args()
11683 meta->arg_btf = reg->btf; in check_kfunc_args()
11684 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
11692 if (reg->type != PTR_TO_STACK && in check_kfunc_args()
11693 reg->type != CONST_PTR_TO_DYNPTR) { in check_kfunc_args()
11695 return -EINVAL; in check_kfunc_args()
11698 if (reg->type == CONST_PTR_TO_DYNPTR) in check_kfunc_args()
11704 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { in check_kfunc_args()
11706 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { in check_kfunc_args()
11708 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && in check_kfunc_args()
11710 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; in check_kfunc_args()
11714 return -EFAULT; in check_kfunc_args()
11718 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; in check_kfunc_args()
11721 return -EFAULT; in check_kfunc_args()
11736 meta->initialized_dynptr.id = id; in check_kfunc_args()
11737 meta->initialized_dynptr.type = dynptr_get_type(env, reg); in check_kfunc_args()
11738 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); in check_kfunc_args()
11744 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { in check_kfunc_args()
11747 return -EINVAL; in check_kfunc_args()
11755 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
11756 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
11758 return -EINVAL; in check_kfunc_args()
11760 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
11762 return -EINVAL; in check_kfunc_args()
11769 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
11770 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
11772 return -EINVAL; in check_kfunc_args()
11774 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
11776 return -EINVAL; in check_kfunc_args()
11783 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
11785 return -EINVAL; in check_kfunc_args()
11787 if (!reg->ref_obj_id) { in check_kfunc_args()
11789 return -EINVAL; in check_kfunc_args()
11796 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { in check_kfunc_args()
11797 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { in check_kfunc_args()
11798 verbose(env, "rbtree_remove node input must be non-owning ref\n"); in check_kfunc_args()
11799 return -EINVAL; in check_kfunc_args()
11803 return -EINVAL; in check_kfunc_args()
11806 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
11808 return -EINVAL; in check_kfunc_args()
11810 if (!reg->ref_obj_id) { in check_kfunc_args()
11812 return -EINVAL; in check_kfunc_args()
11822 if ((base_type(reg->type) != PTR_TO_BTF_ID || in check_kfunc_args()
11823 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && in check_kfunc_args()
11824 !reg2btf_ids[base_type(reg->type)]) { in check_kfunc_args()
11825 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); in check_kfunc_args()
11827 reg_type_str(env, base_type(reg->type) | in check_kfunc_args()
11828 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); in check_kfunc_args()
11829 return -EINVAL; in check_kfunc_args()
11840 return -EINVAL; in check_kfunc_args()
11853 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { in check_kfunc_args()
11861 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { in check_kfunc_args()
11862 if (meta->arg_constant.found) { in check_kfunc_args()
11864 return -EFAULT; in check_kfunc_args()
11866 if (!tnum_is_const(size_reg->var_off)) { in check_kfunc_args()
11868 return -EINVAL; in check_kfunc_args()
11870 meta->arg_constant.found = true; in check_kfunc_args()
11871 meta->arg_constant.value = size_reg->var_off.value; in check_kfunc_args()
11879 if (reg->type != PTR_TO_FUNC) { in check_kfunc_args()
11881 return -EINVAL; in check_kfunc_args()
11883 meta->subprogno = reg->subprogno; in check_kfunc_args()
11886 if (!type_is_ptr_alloc_obj(reg->type)) { in check_kfunc_args()
11887 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); in check_kfunc_args()
11888 return -EINVAL; in check_kfunc_args()
11890 if (!type_is_non_owning_ref(reg->type)) in check_kfunc_args()
11891 meta->arg_owning_ref = true; in check_kfunc_args()
11896 return -EFAULT; in check_kfunc_args()
11899 if (rec->refcount_off < 0) { in check_kfunc_args()
11901 return -EINVAL; in check_kfunc_args()
11904 meta->arg_btf = reg->btf; in check_kfunc_args()
11905 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
11908 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
11910 return -EINVAL; in check_kfunc_args()
11919 if (is_kfunc_release(meta) && !meta->release_regno) { in check_kfunc_args()
11922 return -EINVAL; in check_kfunc_args()
11941 if (!insn->imm) in fetch_kfunc_meta()
11942 return -EINVAL; in fetch_kfunc_meta()
11944 desc_btf = find_kfunc_desc_btf(env, insn->off); in fetch_kfunc_meta()
11948 func_id = insn->imm; in fetch_kfunc_meta()
11950 func_name = btf_name_by_offset(desc_btf, func->name_off); in fetch_kfunc_meta()
11953 func_proto = btf_type_by_id(desc_btf, func->type); in fetch_kfunc_meta()
11955 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); in fetch_kfunc_meta()
11957 return -EACCES; in fetch_kfunc_meta()
11961 meta->btf = desc_btf; in fetch_kfunc_meta()
11962 meta->func_id = func_id; in fetch_kfunc_meta()
11963 meta->kfunc_flags = *kfunc_flags; in fetch_kfunc_meta()
11964 meta->func_proto = func_proto; in fetch_kfunc_meta()
11965 meta->func_name = func_name; in fetch_kfunc_meta()
11988 if (!insn->imm) in check_kfunc_call()
11992 if (err == -EACCES && func_name) in check_kfunc_call()
11997 insn_aux = &env->insn_aux_data[insn_idx]; in check_kfunc_call()
11999 insn_aux->is_iter_next = is_iter_next_kfunc(&meta); in check_kfunc_call()
12003 return -EACCES; in check_kfunc_call()
12007 if (sleepable && !env->prog->aux->sleepable) { in check_kfunc_call()
12009 return -EACCES; in check_kfunc_call()
12030 if (env->cur_state->active_rcu_lock) { in check_kfunc_call()
12037 return -EACCES; in check_kfunc_call()
12042 return -EINVAL; in check_kfunc_call()
12044 bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ in check_kfunc_call()
12045 if (reg->type & MEM_RCU) { in check_kfunc_call()
12046 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); in check_kfunc_call()
12047 reg->type |= PTR_UNTRUSTED; in check_kfunc_call()
12050 env->cur_state->active_rcu_lock = false; in check_kfunc_call()
12053 return -EACCES; in check_kfunc_call()
12056 env->cur_state->active_rcu_lock = true; in check_kfunc_call()
12059 return -EINVAL; in check_kfunc_call()
12078 insn_aux->insert_off = regs[BPF_REG_2].off; in check_kfunc_call()
12079 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); in check_kfunc_call()
12082 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", in check_kfunc_call()
12099 return -ENOTSUPP; in check_kfunc_call()
12101 env->seen_exception = true; in check_kfunc_call()
12106 if (!env->exception_callback_subprog) { in check_kfunc_call()
12117 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); in check_kfunc_call()
12126 return -EINVAL; in check_kfunc_call()
12132 mark_btf_func_reg_size(env, BPF_REG_0, t->size); in check_kfunc_call()
12134 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); in check_kfunc_call()
12144 return -ENOMEM; in check_kfunc_call()
12148 return -EINVAL; in check_kfunc_call()
12151 ret_btf = env->prog->aux->btf; in check_kfunc_call()
12157 return -EINVAL; in check_kfunc_call()
12163 return -EINVAL; in check_kfunc_call()
12167 if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { in check_kfunc_call()
12169 ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); in check_kfunc_call()
12170 return -EINVAL; in check_kfunc_call()
12189 err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size); in check_kfunc_call()
12199 return -EINVAL; in check_kfunc_call()
12204 return -EINVAL; in check_kfunc_call()
12215 insn_aux->obj_new_size = ret_t->size; in check_kfunc_call()
12216 insn_aux->kptr_struct_meta = struct_meta; in check_kfunc_call()
12223 insn_aux->kptr_struct_meta = in check_kfunc_call()
12230 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
12235 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
12246 return -EINVAL; in check_kfunc_call()
12261 return -EFAULT; in check_kfunc_call()
12272 /* this will set env->seen_direct_write to true */ in check_kfunc_call()
12275 return -EINVAL; in check_kfunc_call()
12281 return -EFAULT; in check_kfunc_call()
12292 return -EFAULT; in check_kfunc_call()
12305 ptr_type->name_off); in check_kfunc_call()
12311 return -EINVAL; in check_kfunc_call()
12334 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12350 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12355 insn_aux->kptr_struct_meta = in check_kfunc_call()
12372 mark_btf_func_reg_size(env, regno, t->size); in check_kfunc_call()
12386 /* Do the add in u64, where overflow is well-defined */ in signed_add_overflows()
12396 /* Do the add in u32, where overflow is well-defined */ in signed_add32_overflows()
12406 /* Do the sub in u64, where overflow is well-defined */ in signed_sub_overflows()
12407 s64 res = (s64)((u64)a - (u64)b); in signed_sub_overflows()
12416 /* Do the sub in u32, where overflow is well-defined */ in signed_sub32_overflows()
12417 s32 res = (s32)((u32)a - (u32)b); in signed_sub32_overflows()
12428 bool known = tnum_is_const(reg->var_off); in check_reg_sane_offset()
12429 s64 val = reg->var_off.value; in check_reg_sane_offset()
12430 s64 smin = reg->smin_value; in check_reg_sane_offset()
12432 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { in check_reg_sane_offset()
12438 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
12440 reg_type_str(env, type), reg->off); in check_reg_sane_offset()
12450 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
12460 REASON_BOUNDS = -1,
12461 REASON_TYPE = -2,
12462 REASON_PATHS = -3,
12463 REASON_LIMIT = -4,
12464 REASON_STACK = -5,
12472 switch (ptr_reg->type) { in retrieve_ptr_limit()
12474 /* Offset 0 is out-of-bounds, but acceptable start for the in retrieve_ptr_limit()
12480 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); in retrieve_ptr_limit()
12483 max = ptr_reg->map_ptr->value_size; in retrieve_ptr_limit()
12485 ptr_reg->smin_value : in retrieve_ptr_limit()
12486 ptr_reg->umax_value) + ptr_reg->off; in retrieve_ptr_limit()
12501 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; in can_skip_alu_sanitation()
12510 if (aux->alu_state && in update_alu_sanitation_state()
12511 (aux->alu_state != alu_state || in update_alu_sanitation_state()
12512 aux->alu_limit != alu_limit)) in update_alu_sanitation_state()
12516 aux->alu_state = alu_state; in update_alu_sanitation_state()
12517 aux->alu_limit = alu_limit; in update_alu_sanitation_state()
12552 regs = branch->frame[branch->curframe]->regs; in sanitize_speculative_path()
12553 if (BPF_SRC(insn->code) == BPF_K) { in sanitize_speculative_path()
12554 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
12555 } else if (BPF_SRC(insn->code) == BPF_X) { in sanitize_speculative_path()
12556 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
12557 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
12571 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; in sanitize_ptr_alu()
12572 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_ptr_alu()
12573 bool off_is_imm = tnum_is_const(off_reg->var_off); in sanitize_ptr_alu()
12574 bool off_is_neg = off_reg->smin_value < 0; in sanitize_ptr_alu()
12576 u8 opcode = BPF_OP(insn->code); in sanitize_ptr_alu()
12585 /* We already marked aux for masking from non-speculative in sanitize_ptr_alu()
12589 if (vstate->speculative) in sanitize_ptr_alu()
12593 if (!tnum_is_const(off_reg->var_off) && in sanitize_ptr_alu()
12594 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) in sanitize_ptr_alu()
12597 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || in sanitize_ptr_alu()
12601 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); in sanitize_ptr_alu()
12609 alu_state = info->aux.alu_state; in sanitize_ptr_alu()
12610 alu_limit = abs(info->aux.alu_limit - alu_limit); in sanitize_ptr_alu()
12621 env->explore_alu_limits = true; in sanitize_ptr_alu()
12632 * Also, when register is a known constant, we rewrite register-based in sanitize_ptr_alu()
12633 * operation to immediate-based, and thus do not need masking (and as in sanitize_ptr_alu()
12634 * a consequence, do not need to simulate the zero-truncation either). in sanitize_ptr_alu()
12639 /* Simulate and find potential out-of-bounds access under in sanitize_ptr_alu()
12643 * to simulate dst (== 0) +/-= ptr. Needed, for example, in sanitize_ptr_alu()
12644 * for cases where we use K-based arithmetic in one direction in sanitize_ptr_alu()
12645 * and truncated reg-based in the other in order to explore in sanitize_ptr_alu()
12652 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, in sanitize_ptr_alu()
12653 env->insn_idx); in sanitize_ptr_alu()
12661 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_mark_insn_seen()
12665 * the non-speculative domain, sanitize_dead_code() can still in sanitize_mark_insn_seen()
12668 if (!vstate->speculative) in sanitize_mark_insn_seen()
12669 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in sanitize_mark_insn_seen()
12678 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; in sanitize_err()
12679 u32 dst = insn->dst_reg, src = insn->src_reg; in sanitize_err()
12708 return -EACCES; in sanitize_err()
12719 * 'off' includes 'reg->off'.
12727 if (!tnum_is_const(reg->var_off)) { in check_stack_access_for_ptr_arithmetic()
12730 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_for_ptr_arithmetic()
12733 return -EACCES; in check_stack_access_for_ptr_arithmetic()
12736 if (off >= 0 || off < -MAX_BPF_STACK) { in check_stack_access_for_ptr_arithmetic()
12739 return -EACCES; in check_stack_access_for_ptr_arithmetic()
12749 u32 dst = insn->dst_reg; in sanitize_check_bounds()
12754 if (env->bypass_spec_v1) in sanitize_check_bounds()
12757 switch (dst_reg->type) { in sanitize_check_bounds()
12760 dst_reg->off + dst_reg->var_off.value)) in sanitize_check_bounds()
12761 return -EACCES; in sanitize_check_bounds()
12764 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { in sanitize_check_bounds()
12767 return -EACCES; in sanitize_check_bounds()
12779 * If we return -EACCES, caller may want to try again treating pointer as a
12780 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
12787 struct bpf_verifier_state *vstate = env->cur_state; in adjust_ptr_min_max_vals()
12788 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_ptr_min_max_vals()
12789 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals()
12790 bool known = tnum_is_const(off_reg->var_off); in adjust_ptr_min_max_vals()
12791 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, in adjust_ptr_min_max_vals()
12792 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; in adjust_ptr_min_max_vals()
12793 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, in adjust_ptr_min_max_vals()
12794 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; in adjust_ptr_min_max_vals()
12796 u8 opcode = BPF_OP(insn->code); in adjust_ptr_min_max_vals()
12797 u32 dst = insn->dst_reg; in adjust_ptr_min_max_vals()
12811 if (BPF_CLASS(insn->code) != BPF_ALU64) { in adjust_ptr_min_max_vals()
12812 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ in adjust_ptr_min_max_vals()
12813 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_ptr_min_max_vals()
12819 "R%d 32-bit pointer arithmetic prohibited\n", in adjust_ptr_min_max_vals()
12821 return -EACCES; in adjust_ptr_min_max_vals()
12824 if (ptr_reg->type & PTR_MAYBE_NULL) { in adjust_ptr_min_max_vals()
12825 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", in adjust_ptr_min_max_vals()
12826 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
12827 return -EACCES; in adjust_ptr_min_max_vals()
12830 switch (base_type(ptr_reg->type)) { in adjust_ptr_min_max_vals()
12846 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
12847 return -EACCES; in adjust_ptr_min_max_vals()
12855 dst_reg->type = ptr_reg->type; in adjust_ptr_min_max_vals()
12856 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
12858 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || in adjust_ptr_min_max_vals()
12859 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
12860 return -EINVAL; in adjust_ptr_min_max_vals()
12862 /* pointer types do not carry 32-bit bounds at the moment. */ in adjust_ptr_min_max_vals()
12877 if (known && (ptr_reg->off + smin_val == in adjust_ptr_min_max_vals()
12878 (s64)(s32)(ptr_reg->off + smin_val))) { in adjust_ptr_min_max_vals()
12880 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
12881 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
12882 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
12883 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
12884 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
12885 dst_reg->off = ptr_reg->off + smin_val; in adjust_ptr_min_max_vals()
12886 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
12889 /* A new variable offset is created. Note that off_reg->off in adjust_ptr_min_max_vals()
12900 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
12901 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
12903 dst_reg->smin_value = smin_ptr + smin_val; in adjust_ptr_min_max_vals()
12904 dst_reg->smax_value = smax_ptr + smax_val; in adjust_ptr_min_max_vals()
12908 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
12909 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
12911 dst_reg->umin_value = umin_ptr + umin_val; in adjust_ptr_min_max_vals()
12912 dst_reg->umax_value = umax_ptr + umax_val; in adjust_ptr_min_max_vals()
12914 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
12915 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
12916 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
12918 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
12920 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
12925 /* scalar -= pointer. Creates an unknown scalar */ in adjust_ptr_min_max_vals()
12928 return -EACCES; in adjust_ptr_min_max_vals()
12934 if (ptr_reg->type == PTR_TO_STACK) { in adjust_ptr_min_max_vals()
12937 return -EACCES; in adjust_ptr_min_max_vals()
12939 if (known && (ptr_reg->off - smin_val == in adjust_ptr_min_max_vals()
12940 (s64)(s32)(ptr_reg->off - smin_val))) { in adjust_ptr_min_max_vals()
12941 /* pointer -= K. Subtract it from fixed offset */ in adjust_ptr_min_max_vals()
12942 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
12943 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
12944 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
12945 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
12946 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
12947 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
12948 dst_reg->off = ptr_reg->off - smin_val; in adjust_ptr_min_max_vals()
12949 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
12953 * nonnegative, then any reg->range we had before is still good. in adjust_ptr_min_max_vals()
12958 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
12959 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
12961 dst_reg->smin_value = smin_ptr - smax_val; in adjust_ptr_min_max_vals()
12962 dst_reg->smax_value = smax_ptr - smin_val; in adjust_ptr_min_max_vals()
12966 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
12967 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
12970 dst_reg->umin_value = umin_ptr - umax_val; in adjust_ptr_min_max_vals()
12971 dst_reg->umax_value = umax_ptr - umin_val; in adjust_ptr_min_max_vals()
12973 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
12974 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
12975 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
12977 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
12980 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
12989 return -EACCES; in adjust_ptr_min_max_vals()
12991 /* other operators (e.g. MUL,LSH) produce non-pointer results */ in adjust_ptr_min_max_vals()
12994 return -EACCES; in adjust_ptr_min_max_vals()
12997 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
12998 return -EINVAL; in adjust_ptr_min_max_vals()
13001 return -EACCES; in adjust_ptr_min_max_vals()
13015 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_add()
13016 s32 smax_val = src_reg->s32_max_value; in scalar32_min_max_add()
13017 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_add()
13018 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_add()
13020 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || in scalar32_min_max_add()
13021 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { in scalar32_min_max_add()
13022 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_add()
13023 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_add()
13025 dst_reg->s32_min_value += smin_val; in scalar32_min_max_add()
13026 dst_reg->s32_max_value += smax_val; in scalar32_min_max_add()
13028 if (dst_reg->u32_min_value + umin_val < umin_val || in scalar32_min_max_add()
13029 dst_reg->u32_max_value + umax_val < umax_val) { in scalar32_min_max_add()
13030 dst_reg->u32_min_value = 0; in scalar32_min_max_add()
13031 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_add()
13033 dst_reg->u32_min_value += umin_val; in scalar32_min_max_add()
13034 dst_reg->u32_max_value += umax_val; in scalar32_min_max_add()
13041 s64 smin_val = src_reg->smin_value; in scalar_min_max_add()
13042 s64 smax_val = src_reg->smax_value; in scalar_min_max_add()
13043 u64 umin_val = src_reg->umin_value; in scalar_min_max_add()
13044 u64 umax_val = src_reg->umax_value; in scalar_min_max_add()
13046 if (signed_add_overflows(dst_reg->smin_value, smin_val) || in scalar_min_max_add()
13047 signed_add_overflows(dst_reg->smax_value, smax_val)) { in scalar_min_max_add()
13048 dst_reg->smin_value = S64_MIN; in scalar_min_max_add()
13049 dst_reg->smax_value = S64_MAX; in scalar_min_max_add()
13051 dst_reg->smin_value += smin_val; in scalar_min_max_add()
13052 dst_reg->smax_value += smax_val; in scalar_min_max_add()
13054 if (dst_reg->umin_value + umin_val < umin_val || in scalar_min_max_add()
13055 dst_reg->umax_value + umax_val < umax_val) { in scalar_min_max_add()
13056 dst_reg->umin_value = 0; in scalar_min_max_add()
13057 dst_reg->umax_value = U64_MAX; in scalar_min_max_add()
13059 dst_reg->umin_value += umin_val; in scalar_min_max_add()
13060 dst_reg->umax_value += umax_val; in scalar_min_max_add()
13067 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_sub()
13068 s32 smax_val = src_reg->s32_max_value; in scalar32_min_max_sub()
13069 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_sub()
13070 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_sub()
13072 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || in scalar32_min_max_sub()
13073 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { in scalar32_min_max_sub()
13075 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_sub()
13076 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_sub()
13078 dst_reg->s32_min_value -= smax_val; in scalar32_min_max_sub()
13079 dst_reg->s32_max_value -= smin_val; in scalar32_min_max_sub()
13081 if (dst_reg->u32_min_value < umax_val) { in scalar32_min_max_sub()
13083 dst_reg->u32_min_value = 0; in scalar32_min_max_sub()
13084 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_sub()
13087 dst_reg->u32_min_value -= umax_val; in scalar32_min_max_sub()
13088 dst_reg->u32_max_value -= umin_val; in scalar32_min_max_sub()
13095 s64 smin_val = src_reg->smin_value; in scalar_min_max_sub()
13096 s64 smax_val = src_reg->smax_value; in scalar_min_max_sub()
13097 u64 umin_val = src_reg->umin_value; in scalar_min_max_sub()
13098 u64 umax_val = src_reg->umax_value; in scalar_min_max_sub()
13100 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || in scalar_min_max_sub()
13101 signed_sub_overflows(dst_reg->smax_value, smin_val)) { in scalar_min_max_sub()
13103 dst_reg->smin_value = S64_MIN; in scalar_min_max_sub()
13104 dst_reg->smax_value = S64_MAX; in scalar_min_max_sub()
13106 dst_reg->smin_value -= smax_val; in scalar_min_max_sub()
13107 dst_reg->smax_value -= smin_val; in scalar_min_max_sub()
13109 if (dst_reg->umin_value < umax_val) { in scalar_min_max_sub()
13111 dst_reg->umin_value = 0; in scalar_min_max_sub()
13112 dst_reg->umax_value = U64_MAX; in scalar_min_max_sub()
13115 dst_reg->umin_value -= umax_val; in scalar_min_max_sub()
13116 dst_reg->umax_value -= umin_val; in scalar_min_max_sub()
13123 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_mul()
13124 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_mul()
13125 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_mul()
13127 if (smin_val < 0 || dst_reg->s32_min_value < 0) { in scalar32_min_max_mul()
13135 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { in scalar32_min_max_mul()
13140 dst_reg->u32_min_value *= umin_val; in scalar32_min_max_mul()
13141 dst_reg->u32_max_value *= umax_val; in scalar32_min_max_mul()
13142 if (dst_reg->u32_max_value > S32_MAX) { in scalar32_min_max_mul()
13144 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_mul()
13145 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_mul()
13147 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_mul()
13148 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_mul()
13155 s64 smin_val = src_reg->smin_value; in scalar_min_max_mul()
13156 u64 umin_val = src_reg->umin_value; in scalar_min_max_mul()
13157 u64 umax_val = src_reg->umax_value; in scalar_min_max_mul()
13159 if (smin_val < 0 || dst_reg->smin_value < 0) { in scalar_min_max_mul()
13167 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { in scalar_min_max_mul()
13172 dst_reg->umin_value *= umin_val; in scalar_min_max_mul()
13173 dst_reg->umax_value *= umax_val; in scalar_min_max_mul()
13174 if (dst_reg->umax_value > S64_MAX) { in scalar_min_max_mul()
13176 dst_reg->smin_value = S64_MIN; in scalar_min_max_mul()
13177 dst_reg->smax_value = S64_MAX; in scalar_min_max_mul()
13179 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_mul()
13180 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_mul()
13187 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_and()
13188 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_and()
13189 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_and()
13190 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_and()
13191 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_and()
13201 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_and()
13202 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); in scalar32_min_max_and()
13203 if (dst_reg->s32_min_value < 0 || smin_val < 0) { in scalar32_min_max_and()
13207 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_and()
13208 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_and()
13213 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_and()
13214 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_and()
13221 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_and()
13222 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_and()
13223 s64 smin_val = src_reg->smin_value; in scalar_min_max_and()
13224 u64 umax_val = src_reg->umax_value; in scalar_min_max_and()
13227 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_and()
13234 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_and()
13235 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); in scalar_min_max_and()
13236 if (dst_reg->smin_value < 0 || smin_val < 0) { in scalar_min_max_and()
13240 dst_reg->smin_value = S64_MIN; in scalar_min_max_and()
13241 dst_reg->smax_value = S64_MAX; in scalar_min_max_and()
13246 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_and()
13247 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_and()
13256 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_or()
13257 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_or()
13258 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_or()
13259 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_or()
13260 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_or()
13270 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); in scalar32_min_max_or()
13271 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_or()
13272 if (dst_reg->s32_min_value < 0 || smin_val < 0) { in scalar32_min_max_or()
13276 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_or()
13277 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_or()
13282 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_or()
13283 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_or()
13290 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_or()
13291 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_or()
13292 s64 smin_val = src_reg->smin_value; in scalar_min_max_or()
13293 u64 umin_val = src_reg->umin_value; in scalar_min_max_or()
13296 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_or()
13303 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); in scalar_min_max_or()
13304 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_or()
13305 if (dst_reg->smin_value < 0 || smin_val < 0) { in scalar_min_max_or()
13309 dst_reg->smin_value = S64_MIN; in scalar_min_max_or()
13310 dst_reg->smax_value = S64_MAX; in scalar_min_max_or()
13315 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_or()
13316 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_or()
13325 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_xor()
13326 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_xor()
13327 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_xor()
13328 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_xor()
13336 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_xor()
13337 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_xor()
13339 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { in scalar32_min_max_xor()
13343 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_xor()
13344 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_xor()
13346 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_xor()
13347 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_xor()
13354 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_xor()
13355 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_xor()
13356 s64 smin_val = src_reg->smin_value; in scalar_min_max_xor()
13359 /* dst_reg->var_off.value has been updated earlier */ in scalar_min_max_xor()
13360 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_xor()
13365 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_xor()
13366 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_xor()
13368 if (dst_reg->smin_value >= 0 && smin_val >= 0) { in scalar_min_max_xor()
13372 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_xor()
13373 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_xor()
13375 dst_reg->smin_value = S64_MIN; in scalar_min_max_xor()
13376 dst_reg->smax_value = S64_MAX; in scalar_min_max_xor()
13388 dst_reg->s32_min_value = S32_MIN; in __scalar32_min_max_lsh()
13389 dst_reg->s32_max_value = S32_MAX; in __scalar32_min_max_lsh()
13391 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { in __scalar32_min_max_lsh()
13392 dst_reg->u32_min_value = 0; in __scalar32_min_max_lsh()
13393 dst_reg->u32_max_value = U32_MAX; in __scalar32_min_max_lsh()
13395 dst_reg->u32_min_value <<= umin_val; in __scalar32_min_max_lsh()
13396 dst_reg->u32_max_value <<= umax_val; in __scalar32_min_max_lsh()
13403 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_lsh()
13404 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_lsh()
13406 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_lsh()
13409 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); in scalar32_min_max_lsh()
13428 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) in __scalar64_min_max_lsh()
13429 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; in __scalar64_min_max_lsh()
13431 dst_reg->smax_value = S64_MAX; in __scalar64_min_max_lsh()
13433 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) in __scalar64_min_max_lsh()
13434 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; in __scalar64_min_max_lsh()
13436 dst_reg->smin_value = S64_MIN; in __scalar64_min_max_lsh()
13439 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { in __scalar64_min_max_lsh()
13440 dst_reg->umin_value = 0; in __scalar64_min_max_lsh()
13441 dst_reg->umax_value = U64_MAX; in __scalar64_min_max_lsh()
13443 dst_reg->umin_value <<= umin_val; in __scalar64_min_max_lsh()
13444 dst_reg->umax_value <<= umax_val; in __scalar64_min_max_lsh()
13451 u64 umax_val = src_reg->umax_value; in scalar_min_max_lsh()
13452 u64 umin_val = src_reg->umin_value; in scalar_min_max_lsh()
13458 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); in scalar_min_max_lsh()
13466 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_rsh()
13467 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_rsh()
13468 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_rsh()
13484 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_rsh()
13485 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_rsh()
13487 dst_reg->var_off = tnum_rshift(subreg, umin_val); in scalar32_min_max_rsh()
13488 dst_reg->u32_min_value >>= umax_val; in scalar32_min_max_rsh()
13489 dst_reg->u32_max_value >>= umin_val; in scalar32_min_max_rsh()
13498 u64 umax_val = src_reg->umax_value; in scalar_min_max_rsh()
13499 u64 umin_val = src_reg->umin_value; in scalar_min_max_rsh()
13515 dst_reg->smin_value = S64_MIN; in scalar_min_max_rsh()
13516 dst_reg->smax_value = S64_MAX; in scalar_min_max_rsh()
13517 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); in scalar_min_max_rsh()
13518 dst_reg->umin_value >>= umax_val; in scalar_min_max_rsh()
13519 dst_reg->umax_value >>= umin_val; in scalar_min_max_rsh()
13532 u64 umin_val = src_reg->u32_min_value; in scalar32_min_max_arsh()
13537 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); in scalar32_min_max_arsh()
13538 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); in scalar32_min_max_arsh()
13540 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); in scalar32_min_max_arsh()
13545 dst_reg->u32_min_value = 0; in scalar32_min_max_arsh()
13546 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_arsh()
13555 u64 umin_val = src_reg->umin_value; in scalar_min_max_arsh()
13560 dst_reg->smin_value >>= umin_val; in scalar_min_max_arsh()
13561 dst_reg->smax_value >>= umin_val; in scalar_min_max_arsh()
13563 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); in scalar_min_max_arsh()
13568 dst_reg->umin_value = 0; in scalar_min_max_arsh()
13569 dst_reg->umax_value = U64_MAX; in scalar_min_max_arsh()
13572 * on bits being shifted in from upper 32-bits. Take easy way out in scalar_min_max_arsh()
13579 /* WARNING: This function does calculations on 64-bit values, but the actual
13580 * execution may occur on 32-bit values. Therefore, things like bitshifts
13581 * need extra checks in the 32-bit case.
13589 u8 opcode = BPF_OP(insn->code); in adjust_scalar_min_max_vals()
13595 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; in adjust_scalar_min_max_vals()
13596 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_scalar_min_max_vals()
13651 * understand and calculate behavior in both 32-bit and 64-bit alu ops. in adjust_scalar_min_max_vals()
13663 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
13668 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
13671 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
13676 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
13681 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
13686 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
13695 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13708 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13721 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13730 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13747 struct bpf_verifier_state *vstate = env->cur_state; in adjust_reg_min_max_vals()
13748 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_reg_min_max_vals()
13749 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals()
13751 u8 opcode = BPF_OP(insn->code); in adjust_reg_min_max_vals()
13754 dst_reg = &regs[insn->dst_reg]; in adjust_reg_min_max_vals()
13756 if (dst_reg->type != SCALAR_VALUE) in adjust_reg_min_max_vals()
13762 dst_reg->id = 0; in adjust_reg_min_max_vals()
13763 if (BPF_SRC(insn->code) == BPF_X) { in adjust_reg_min_max_vals()
13764 src_reg = &regs[insn->src_reg]; in adjust_reg_min_max_vals()
13765 if (src_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
13766 if (dst_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
13771 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_reg_min_max_vals()
13772 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
13776 insn->dst_reg, in adjust_reg_min_max_vals()
13778 return -EACCES; in adjust_reg_min_max_vals()
13784 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
13792 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
13797 } else if (dst_reg->precise) { in adjust_reg_min_max_vals()
13799 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
13808 __mark_reg_known(&off_reg, insn->imm); in adjust_reg_min_max_vals()
13819 return -EINVAL; in adjust_reg_min_max_vals()
13824 return -EINVAL; in adjust_reg_min_max_vals()
13829 /* check validity of 32-bit and 64-bit arithmetic operations */
13833 u8 opcode = BPF_OP(insn->code); in check_alu_op()
13838 if (BPF_SRC(insn->code) != BPF_K || in check_alu_op()
13839 insn->src_reg != BPF_REG_0 || in check_alu_op()
13840 insn->off != 0 || insn->imm != 0) { in check_alu_op()
13842 return -EINVAL; in check_alu_op()
13845 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || in check_alu_op()
13846 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || in check_alu_op()
13847 (BPF_CLASS(insn->code) == BPF_ALU64 && in check_alu_op()
13848 BPF_SRC(insn->code) != BPF_TO_LE)) { in check_alu_op()
13850 return -EINVAL; in check_alu_op()
13855 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
13859 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
13861 insn->dst_reg); in check_alu_op()
13862 return -EACCES; in check_alu_op()
13866 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
13872 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
13873 if (insn->imm != 0) { in check_alu_op()
13875 return -EINVAL; in check_alu_op()
13878 if (BPF_CLASS(insn->code) == BPF_ALU) { in check_alu_op()
13879 if (insn->off != 0 && insn->off != 8 && insn->off != 16) { in check_alu_op()
13881 return -EINVAL; in check_alu_op()
13884 if (insn->off != 0 && insn->off != 8 && insn->off != 16 && in check_alu_op()
13885 insn->off != 32) { in check_alu_op()
13887 return -EINVAL; in check_alu_op()
13892 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
13896 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
13898 return -EINVAL; in check_alu_op()
13903 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
13907 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
13908 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
13909 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
13910 bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id && in check_alu_op()
13911 !tnum_is_const(src_reg->var_off); in check_alu_op()
13913 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
13914 if (insn->off == 0) { in check_alu_op()
13923 src_reg->id = ++env->id_gen; in check_alu_op()
13925 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
13926 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
13929 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
13931 "R%d sign-extension part of pointer\n", in check_alu_op()
13932 insn->src_reg); in check_alu_op()
13933 return -EACCES; in check_alu_op()
13934 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
13937 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
13939 src_reg->id = ++env->id_gen; in check_alu_op()
13942 dst_reg->id = 0; in check_alu_op()
13943 coerce_reg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
13944 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
13945 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
13947 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
13952 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
13955 insn->src_reg); in check_alu_op()
13956 return -EACCES; in check_alu_op()
13957 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
13958 if (insn->off == 0) { in check_alu_op()
13959 bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX; in check_alu_op()
13962 src_reg->id = ++env->id_gen; in check_alu_op()
13969 dst_reg->id = 0; in check_alu_op()
13970 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
13971 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
13974 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
13977 src_reg->id = ++env->id_gen; in check_alu_op()
13980 dst_reg->id = 0; in check_alu_op()
13981 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
13982 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
13983 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
13987 insn->dst_reg); in check_alu_op()
13997 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
13998 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
13999 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
14000 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
14001 insn->imm); in check_alu_op()
14003 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
14004 (u32)insn->imm); in check_alu_op()
14010 return -EINVAL; in check_alu_op()
14014 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
14015 if (insn->imm != 0 || insn->off > 1 || in check_alu_op()
14016 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
14018 return -EINVAL; in check_alu_op()
14021 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
14025 if (insn->src_reg != BPF_REG_0 || insn->off > 1 || in check_alu_op()
14026 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
14028 return -EINVAL; in check_alu_op()
14033 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
14038 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { in check_alu_op()
14040 return -EINVAL; in check_alu_op()
14044 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { in check_alu_op()
14045 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; in check_alu_op()
14047 if (insn->imm < 0 || insn->imm >= size) { in check_alu_op()
14048 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
14049 return -EINVAL; in check_alu_op()
14054 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
14060 return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu"); in check_alu_op()
14072 if (dst_reg->off < 0 || in find_good_pkt_pointers()
14073 (dst_reg->off == 0 && range_right_open)) in find_good_pkt_pointers()
14077 if (dst_reg->umax_value > MAX_PACKET_OFF || in find_good_pkt_pointers()
14078 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) in find_good_pkt_pointers()
14084 new_range = dst_reg->off; in find_good_pkt_pointers()
14125 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) in find_good_pkt_pointers()
14126 * and [r3, r3 + 8-1) respectively is safe to access depending on in find_good_pkt_pointers()
14133 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. in find_good_pkt_pointers()
14136 if (reg->type == type && reg->id == dst_reg->id) in find_good_pkt_pointers()
14138 reg->range = max(reg->range, new_range); in find_good_pkt_pointers()
14148 struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off; in is_scalar_branch_taken()
14149 struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off; in is_scalar_branch_taken()
14150 u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value; in is_scalar_branch_taken()
14151 u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value; in is_scalar_branch_taken()
14152 s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value; in is_scalar_branch_taken()
14153 s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value; in is_scalar_branch_taken()
14154 u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value; in is_scalar_branch_taken()
14155 u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value; in is_scalar_branch_taken()
14156 s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value; in is_scalar_branch_taken()
14157 s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value; in is_scalar_branch_taken()
14166 /* non-overlapping ranges */ in is_scalar_branch_taken()
14172 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
14173 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
14176 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
14177 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
14179 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
14180 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
14190 /* non-overlapping ranges */ in is_scalar_branch_taken()
14196 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
14197 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
14200 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
14201 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
14203 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
14204 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
14214 return -1; in is_scalar_branch_taken()
14270 return -1; in is_scalar_branch_taken()
14300 if (src_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
14302 } else if (dst_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
14306 return -1; in is_pkt_ptr_branch_taken()
14309 if (pkt->range >= 0) in is_pkt_ptr_branch_taken()
14310 return -1; in is_pkt_ptr_branch_taken()
14318 if (pkt->range == BEYOND_PKT_END) in is_pkt_ptr_branch_taken()
14327 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) in is_pkt_ptr_branch_taken()
14331 return -1; in is_pkt_ptr_branch_taken()
14336 * 1 - branch will be taken and "goto target" will be executed
14337 * 0 - branch will not be taken and fall-through to next insn
14338 * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
14357 return -1; in is_branch_taken()
14360 return -1; in is_branch_taken()
14367 return -1; in is_branch_taken()
14375 return -1; in is_branch_taken()
14419 reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
14420 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
14421 reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
14422 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
14423 reg2->u32_min_value = reg1->u32_min_value; in regs_refine_cond_op()
14424 reg2->u32_max_value = reg1->u32_max_value; in regs_refine_cond_op()
14425 reg2->s32_min_value = reg1->s32_min_value; in regs_refine_cond_op()
14426 reg2->s32_max_value = reg1->s32_max_value; in regs_refine_cond_op()
14428 t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off)); in regs_refine_cond_op()
14429 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
14430 reg2->var_off = tnum_with_subreg(reg2->var_off, t); in regs_refine_cond_op()
14432 reg1->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
14433 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
14434 reg1->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
14435 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
14436 reg2->umin_value = reg1->umin_value; in regs_refine_cond_op()
14437 reg2->umax_value = reg1->umax_value; in regs_refine_cond_op()
14438 reg2->smin_value = reg1->smin_value; in regs_refine_cond_op()
14439 reg2->smax_value = reg1->smax_value; in regs_refine_cond_op()
14441 reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off); in regs_refine_cond_op()
14442 reg2->var_off = reg1->var_off; in regs_refine_cond_op()
14452 * is exactly the edge of reg1. in regs_refine_cond_op()
14465 if (reg1->u32_min_value == (u32)val) in regs_refine_cond_op()
14466 reg1->u32_min_value++; in regs_refine_cond_op()
14467 if (reg1->u32_max_value == (u32)val) in regs_refine_cond_op()
14468 reg1->u32_max_value--; in regs_refine_cond_op()
14469 if (reg1->s32_min_value == (s32)val) in regs_refine_cond_op()
14470 reg1->s32_min_value++; in regs_refine_cond_op()
14471 if (reg1->s32_max_value == (s32)val) in regs_refine_cond_op()
14472 reg1->s32_max_value--; in regs_refine_cond_op()
14474 if (reg1->umin_value == (u64)val) in regs_refine_cond_op()
14475 reg1->umin_value++; in regs_refine_cond_op()
14476 if (reg1->umax_value == (u64)val) in regs_refine_cond_op()
14477 reg1->umax_value--; in regs_refine_cond_op()
14478 if (reg1->smin_value == (s64)val) in regs_refine_cond_op()
14479 reg1->smin_value++; in regs_refine_cond_op()
14480 if (reg1->smax_value == (s64)val) in regs_refine_cond_op()
14481 reg1->smax_value--; in regs_refine_cond_op()
14494 * it's a single-bit value to begin with. in regs_refine_cond_op()
14503 t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val)); in regs_refine_cond_op()
14504 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
14506 reg1->var_off = tnum_or(reg1->var_off, tnum_const(val)); in regs_refine_cond_op()
14516 t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val)); in regs_refine_cond_op()
14517 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
14519 reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val)); in regs_refine_cond_op()
14524 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
14525 reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
14527 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
14528 reg2->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
14533 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1); in regs_refine_cond_op()
14534 reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value); in regs_refine_cond_op()
14536 reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1); in regs_refine_cond_op()
14537 reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value); in regs_refine_cond_op()
14542 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
14543 reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
14545 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
14546 reg2->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
14551 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1); in regs_refine_cond_op()
14552 reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value); in regs_refine_cond_op()
14554 reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1); in regs_refine_cond_op()
14555 reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value); in regs_refine_cond_op()
14573 * check, in which case we havea fake SCALAR_VALUE representing insn->imm).
14590 if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) in reg_set_min_max()
14614 if (type_may_be_null(reg->type) && reg->id == id && in mark_ptr_or_null_reg()
14615 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { in mark_ptr_or_null_reg()
14617 * known-zero, because we don't allow pointer arithmetic on in mark_ptr_or_null_reg()
14623 * is fine to expect to see reg->off. in mark_ptr_or_null_reg()
14625 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) in mark_ptr_or_null_reg()
14627 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && in mark_ptr_or_null_reg()
14628 WARN_ON_ONCE(reg->off)) in mark_ptr_or_null_reg()
14632 reg->type = SCALAR_VALUE; in mark_ptr_or_null_reg()
14637 reg->id = 0; in mark_ptr_or_null_reg()
14638 reg->ref_obj_id = 0; in mark_ptr_or_null_reg()
14646 /* For not-NULL ptr, reg->ref_obj_id will be reset in mark_ptr_or_null_reg()
14649 * reg->id is still used by spin_lock ptr. Other in mark_ptr_or_null_reg()
14650 * than spin_lock ptr type, reg->id can be reset. in mark_ptr_or_null_reg()
14652 reg->id = 0; in mark_ptr_or_null_reg()
14663 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_ptr_or_null_regs()
14664 struct bpf_reg_state *regs = state->regs, *reg; in mark_ptr_or_null_regs()
14686 if (BPF_SRC(insn->code) != BPF_X) in try_match_pkt_pointers()
14689 /* Pointers are always 64-bit. */ in try_match_pkt_pointers()
14690 if (BPF_CLASS(insn->code) == BPF_JMP32) in try_match_pkt_pointers()
14693 switch (BPF_OP(insn->code)) { in try_match_pkt_pointers()
14695 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
14696 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
14697 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
14701 dst_reg->type, false); in try_match_pkt_pointers()
14702 mark_pkt_end(other_branch, insn->dst_reg, true); in try_match_pkt_pointers()
14703 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
14704 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
14706 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
14709 src_reg->type, true); in try_match_pkt_pointers()
14710 mark_pkt_end(this_branch, insn->src_reg, false); in try_match_pkt_pointers()
14716 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
14717 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
14718 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
14722 dst_reg->type, true); in try_match_pkt_pointers()
14723 mark_pkt_end(this_branch, insn->dst_reg, false); in try_match_pkt_pointers()
14724 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
14725 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
14727 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
14730 src_reg->type, false); in try_match_pkt_pointers()
14731 mark_pkt_end(other_branch, insn->src_reg, true); in try_match_pkt_pointers()
14737 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
14738 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
14739 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
14743 dst_reg->type, true); in try_match_pkt_pointers()
14744 mark_pkt_end(other_branch, insn->dst_reg, false); in try_match_pkt_pointers()
14745 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
14746 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
14748 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
14751 src_reg->type, false); in try_match_pkt_pointers()
14752 mark_pkt_end(this_branch, insn->src_reg, true); in try_match_pkt_pointers()
14758 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
14759 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
14760 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
14764 dst_reg->type, false); in try_match_pkt_pointers()
14765 mark_pkt_end(this_branch, insn->dst_reg, true); in try_match_pkt_pointers()
14766 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
14767 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
14769 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
14772 src_reg->type, true); in try_match_pkt_pointers()
14773 mark_pkt_end(other_branch, insn->src_reg, false); in try_match_pkt_pointers()
14792 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) in find_equal_scalars()
14800 struct bpf_verifier_state *this_branch = env->cur_state; in check_cond_jmp_op()
14802 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; in check_cond_jmp_op()
14806 u8 opcode = BPF_OP(insn->code); in check_cond_jmp_op()
14808 int pred = -1; in check_cond_jmp_op()
14814 return -EINVAL; in check_cond_jmp_op()
14818 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
14822 dst_reg = &regs[insn->dst_reg]; in check_cond_jmp_op()
14823 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
14824 if (insn->imm != 0) { in check_cond_jmp_op()
14826 return -EINVAL; in check_cond_jmp_op()
14830 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
14834 src_reg = &regs[insn->src_reg]; in check_cond_jmp_op()
14836 is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
14838 insn->src_reg); in check_cond_jmp_op()
14839 return -EACCES; in check_cond_jmp_op()
14842 if (insn->src_reg != BPF_REG_0) { in check_cond_jmp_op()
14844 return -EINVAL; in check_cond_jmp_op()
14847 src_reg->type = SCALAR_VALUE; in check_cond_jmp_op()
14848 __mark_reg_known(src_reg, insn->imm); in check_cond_jmp_op()
14851 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in check_cond_jmp_op()
14858 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
14859 if (BPF_SRC(insn->code) == BPF_X && !err && in check_cond_jmp_op()
14861 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
14867 /* Only follow the goto, ignore fall-through. If needed, push in check_cond_jmp_op()
14868 * the fall-through branch for simulation under speculative in check_cond_jmp_op()
14871 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
14874 return -EFAULT; in check_cond_jmp_op()
14875 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
14876 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
14877 *insn_idx += insn->off; in check_cond_jmp_op()
14880 /* Only follow the fall-through branch, since that's where the in check_cond_jmp_op()
14884 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
14886 *insn_idx + insn->off + 1, in check_cond_jmp_op()
14888 return -EFAULT; in check_cond_jmp_op()
14889 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
14890 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
14894 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
14897 return -EFAULT; in check_cond_jmp_op()
14898 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; in check_cond_jmp_op()
14900 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
14902 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
14903 &other_branch_regs[insn->src_reg], in check_cond_jmp_op()
14905 } else /* BPF_SRC(insn->code) == BPF_K */ { in check_cond_jmp_op()
14907 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
14915 if (BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
14916 src_reg->type == SCALAR_VALUE && src_reg->id && in check_cond_jmp_op()
14917 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { in check_cond_jmp_op()
14919 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); in check_cond_jmp_op()
14921 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && in check_cond_jmp_op()
14922 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { in check_cond_jmp_op()
14924 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); in check_cond_jmp_op()
14929 * E.g. register A - maybe null in check_cond_jmp_op()
14930 * register B - not null in check_cond_jmp_op()
14931 * for JNE A, B, ... - A is not null in the false branch; in check_cond_jmp_op()
14932 * for JEQ A, B, ... - A is not null in the true branch. in check_cond_jmp_op()
14939 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
14941 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && in check_cond_jmp_op()
14942 base_type(src_reg->type) != PTR_TO_BTF_ID && in check_cond_jmp_op()
14943 base_type(dst_reg->type) != PTR_TO_BTF_ID) { in check_cond_jmp_op()
14957 if (type_may_be_null(src_reg->type)) in check_cond_jmp_op()
14958 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); in check_cond_jmp_op()
14960 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); in check_cond_jmp_op()
14968 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && in check_cond_jmp_op()
14969 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && in check_cond_jmp_op()
14970 type_may_be_null(dst_reg->type)) { in check_cond_jmp_op()
14974 mark_ptr_or_null_regs(this_branch, insn->dst_reg, in check_cond_jmp_op()
14976 mark_ptr_or_null_regs(other_branch, insn->dst_reg, in check_cond_jmp_op()
14978 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], in check_cond_jmp_op()
14980 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
14982 insn->dst_reg); in check_cond_jmp_op()
14983 return -EACCES; in check_cond_jmp_op()
14985 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
14986 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
14999 if (BPF_SIZE(insn->code) != BPF_DW) { in check_ld_imm()
15001 return -EINVAL; in check_ld_imm()
15003 if (insn->off != 0) { in check_ld_imm()
15005 return -EINVAL; in check_ld_imm()
15008 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
15012 dst_reg = &regs[insn->dst_reg]; in check_ld_imm()
15013 if (insn->src_reg == 0) { in check_ld_imm()
15014 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; in check_ld_imm()
15016 dst_reg->type = SCALAR_VALUE; in check_ld_imm()
15017 __mark_reg_known(&regs[insn->dst_reg], imm); in check_ld_imm()
15022 * we either succeed and assign a corresponding dst_reg->type after in check_ld_imm()
15025 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
15027 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { in check_ld_imm()
15028 dst_reg->type = aux->btf_var.reg_type; in check_ld_imm()
15029 switch (base_type(dst_reg->type)) { in check_ld_imm()
15031 dst_reg->mem_size = aux->btf_var.mem_size; in check_ld_imm()
15034 dst_reg->btf = aux->btf_var.btf; in check_ld_imm()
15035 dst_reg->btf_id = aux->btf_var.btf_id; in check_ld_imm()
15039 return -EFAULT; in check_ld_imm()
15044 if (insn->src_reg == BPF_PSEUDO_FUNC) { in check_ld_imm()
15045 struct bpf_prog_aux *aux = env->prog->aux; in check_ld_imm()
15047 env->insn_idx + insn->imm + 1); in check_ld_imm()
15049 if (!aux->func_info) { in check_ld_imm()
15051 return -EINVAL; in check_ld_imm()
15053 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { in check_ld_imm()
15055 return -EINVAL; in check_ld_imm()
15058 dst_reg->type = PTR_TO_FUNC; in check_ld_imm()
15059 dst_reg->subprogno = subprogno; in check_ld_imm()
15063 map = env->used_maps[aux->map_index]; in check_ld_imm()
15064 dst_reg->map_ptr = map; in check_ld_imm()
15066 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || in check_ld_imm()
15067 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { in check_ld_imm()
15068 dst_reg->type = PTR_TO_MAP_VALUE; in check_ld_imm()
15069 dst_reg->off = aux->map_off; in check_ld_imm()
15070 WARN_ON_ONCE(map->max_entries != 1); in check_ld_imm()
15071 /* We want reg->id to be same (0) as map_value is not distinct */ in check_ld_imm()
15072 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || in check_ld_imm()
15073 insn->src_reg == BPF_PSEUDO_MAP_IDX) { in check_ld_imm()
15074 dst_reg->type = CONST_PTR_TO_MAP; in check_ld_imm()
15077 return -EINVAL; in check_ld_imm()
15096 * - they can only appear in the programs where ctx == skb
15097 * - since they are wrappers of function calls, they scratch R1-R5 registers,
15098 * preserve R6-R9, and store return value into R0
15105 * IMM == 32-bit immediate
15108 * R0 - 8/16/32-bit skb data converted to cpu endianness
15114 u8 mode = BPF_MODE(insn->code); in check_ld_abs()
15117 if (!may_access_skb(resolve_prog_type(env->prog))) { in check_ld_abs()
15119 return -EINVAL; in check_ld_abs()
15122 if (!env->ops->gen_ld_abs) { in check_ld_abs()
15124 return -EINVAL; in check_ld_abs()
15127 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || in check_ld_abs()
15128 BPF_SIZE(insn->code) == BPF_DW || in check_ld_abs()
15129 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { in check_ld_abs()
15131 return -EINVAL; in check_ld_abs()
15149 if (env->cur_state->active_lock.ptr) { in check_ld_abs()
15150 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); in check_ld_abs()
15151 return -EINVAL; in check_ld_abs()
15154 if (env->cur_state->active_rcu_lock) { in check_ld_abs()
15155 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n"); in check_ld_abs()
15156 return -EINVAL; in check_ld_abs()
15162 return -EINVAL; in check_ld_abs()
15167 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
15187 /* ld_abs load up to 32-bit skb data. */ in check_ld_abs()
15188 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
15196 const struct bpf_prog *prog = env->prog; in check_return_code()
15199 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_return_code()
15201 struct bpf_func_state *frame = env->cur_state->frame[0]; in check_return_code()
15202 const bool is_subprog = frame->subprogno; in check_return_code()
15204 /* LSM and struct_ops func-ptr's return type could be "void" */ in check_return_code()
15205 if (!is_subprog || frame->in_exception_callback_fn) { in check_return_code()
15208 if (prog->expected_attach_type == BPF_LSM_CGROUP) in check_return_code()
15209 /* See below, can be 0 or 0-1 depending on hook. */ in check_return_code()
15213 if (!prog->aux->attach_func_proto->type) in check_return_code()
15233 return -EACCES; in check_return_code()
15238 if (frame->in_async_callback_fn) { in check_return_code()
15245 if (is_subprog && !frame->in_exception_callback_fn) { in check_return_code()
15246 if (reg->type != SCALAR_VALUE) { in check_return_code()
15248 regno, reg_type_str(env, reg->type)); in check_return_code()
15249 return -EINVAL; in check_return_code()
15256 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
15257 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || in check_return_code()
15258 env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || in check_return_code()
15259 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || in check_return_code()
15260 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || in check_return_code()
15261 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || in check_return_code()
15262 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || in check_return_code()
15263 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || in check_return_code()
15264 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) in check_return_code()
15266 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || in check_return_code()
15267 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) in check_return_code()
15271 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
15283 if (!env->prog->aux->attach_btf_id) in check_return_code()
15288 switch (env->prog->expected_attach_type) { in check_return_code()
15299 return -ENOTSUPP; in check_return_code()
15307 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { in check_return_code()
15313 if (!env->prog->aux->attach_func_proto->type) { in check_return_code()
15326 * depends on the to-be-replaced kernel func or bpf program. in check_return_code()
15333 if (reg->type != SCALAR_VALUE) { in check_return_code()
15335 exit_ctx, regno, reg_type_str(env, reg->type)); in check_return_code()
15336 return -EINVAL; in check_return_code()
15346 prog->expected_attach_type == BPF_LSM_CGROUP && in check_return_code()
15348 !prog->aux->attach_func_proto->type) in check_return_code()
15350 return -EINVAL; in check_return_code()
15354 tnum_in(enforce_attach_type_range, reg->var_off)) in check_return_code()
15355 env->prog->enforce_expected_attach_type = 1; in check_return_code()
15359 /* non-recursive DFS pseudo code
15360 * 1 procedure DFS-iterative(G,v):
15365 * 6 t <- S.peek()
15369 * 10 if edge e is already labelled
15370 * 11 continue with the next edge
15371 * 12 w <- G.adjacentVertex(t,e)
15373 * 14 label e as tree-edge
15378 * 19 label e as back-edge
15381 * 22 label e as forward- or cross-edge
15386 * 0x10 - discovered
15387 * 0x11 - discovered and fall-through edge labelled
15388 * 0x12 - discovered and fall-through and branch edges labelled
15389 * 0x20 - explored
15401 env->insn_aux_data[idx].prune_point = true; in mark_prune_point()
15406 return env->insn_aux_data[insn_idx].prune_point; in is_prune_point()
15411 env->insn_aux_data[idx].force_checkpoint = true; in mark_force_checkpoint()
15416 return env->insn_aux_data[insn_idx].force_checkpoint; in is_force_checkpoint()
15421 env->insn_aux_data[idx].calls_callback = true; in mark_calls_callback()
15426 return env->insn_aux_data[insn_idx].calls_callback; in calls_callback()
15434 /* t, w, e - match pseudo-code above:
15435 * t - index of current instruction
15436 * w - next instruction
15437 * e - edge
15441 int *insn_stack = env->cfg.insn_stack; in push_insn()
15442 int *insn_state = env->cfg.insn_state; in push_insn()
15450 if (w < 0 || w >= env->prog->len) { in push_insn()
15453 return -EINVAL; in push_insn()
15463 /* tree-edge */ in push_insn()
15466 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
15467 return -E2BIG; in push_insn()
15468 insn_stack[env->cfg.cur_stack++] = w; in push_insn()
15471 if (env->bpf_capable) in push_insn()
15475 verbose(env, "back-edge from insn %d to %d\n", t, w); in push_insn()
15476 return -EINVAL; in push_insn()
15478 /* forward- or cross-edge */ in push_insn()
15482 return -EFAULT; in push_insn()
15499 /* when we exit from subprog, we need to record non-linear history */ in visit_func_call_insn()
15510 * < 0 - an error occurred
15511 * DONE_EXPLORING - the instruction was fully explored
15512 * KEEP_EXPLORING - there is still work to be done before it is fully explored
15516 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; in visit_insn()
15522 /* All non-branch instructions have a single fall-through edge. */ in visit_insn()
15523 if (BPF_CLASS(insn->code) != BPF_JMP && in visit_insn()
15524 BPF_CLASS(insn->code) != BPF_JMP32) { in visit_insn()
15529 switch (BPF_OP(insn->code)) { in visit_insn()
15534 if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback) in visit_insn()
15556 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in visit_insn()
15563 * is crucial for fast convergence of open-coded iterator loop in visit_insn()
15569 * It is expected that with correct open-coded iterators in visit_insn()
15576 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); in visit_insn()
15579 if (BPF_SRC(insn->code) != BPF_K) in visit_insn()
15580 return -EINVAL; in visit_insn()
15582 if (BPF_CLASS(insn->code) == BPF_JMP) in visit_insn()
15583 off = insn->off; in visit_insn()
15585 off = insn->imm; in visit_insn()
15587 /* unconditional jump with single edge */ in visit_insn()
15605 return push_insn(t, t + insn->off + 1, BRANCH, env); in visit_insn()
15609 /* non-recursive depth-first-search to detect loops in BPF program
15610 * loop == back-edge in directed graph
15614 int insn_cnt = env->prog->len; in check_cfg()
15619 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
15621 return -ENOMEM; in check_cfg()
15623 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
15626 return -ENOMEM; in check_cfg()
15631 env->cfg.cur_stack = 1; in check_cfg()
15634 while (env->cfg.cur_stack > 0) { in check_cfg()
15635 int t = insn_stack[env->cfg.cur_stack - 1]; in check_cfg()
15641 env->cfg.cur_stack--; in check_cfg()
15648 ret = -EFAULT; in check_cfg()
15654 if (env->cfg.cur_stack < 0) { in check_cfg()
15656 ret = -EFAULT; in check_cfg()
15660 if (env->exception_callback_subprog && !ex_done) { in check_cfg()
15661 ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start; in check_cfg()
15665 env->cfg.cur_stack = 1; in check_cfg()
15671 struct bpf_insn *insn = &env->prog->insnsi[i]; in check_cfg()
15675 ret = -EINVAL; in check_cfg()
15681 ret = -EINVAL; in check_cfg()
15692 env->cfg.insn_state = env->cfg.insn_stack = NULL; in check_cfg()
15700 for (i = 1; i < env->subprog_cnt; i++) { in check_abnormal_return()
15701 if (env->subprog_info[i].has_ld_abs) { in check_abnormal_return()
15703 return -EINVAL; in check_abnormal_return()
15705 if (env->subprog_info[i].has_tail_call) { in check_abnormal_return()
15707 return -EINVAL; in check_abnormal_return()
15729 int ret = -ENOMEM; in check_btf_func_early()
15731 nfuncs = attr->func_info_cnt; in check_btf_func_early()
15734 return -EINVAL; in check_btf_func_early()
15738 urec_size = attr->func_info_rec_size; in check_btf_func_early()
15743 return -EINVAL; in check_btf_func_early()
15746 prog = env->prog; in check_btf_func_early()
15747 btf = prog->aux->btf; in check_btf_func_early()
15749 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func_early()
15754 return -ENOMEM; in check_btf_func_early()
15759 if (ret == -E2BIG) { in check_btf_func_early()
15767 ret = -EFAULT; in check_btf_func_early()
15773 ret = -EFAULT; in check_btf_func_early()
15778 ret = -EINVAL; in check_btf_func_early()
15801 func_proto = btf_type_by_id(btf, type->type); in check_btf_func_early()
15810 prog->aux->func_info = krecord; in check_btf_func_early()
15811 prog->aux->func_info_cnt = nfuncs; in check_btf_func_early()
15831 int ret = -ENOMEM; in check_btf_func()
15833 nfuncs = attr->func_info_cnt; in check_btf_func()
15836 return -EINVAL; in check_btf_func()
15839 if (nfuncs != env->subprog_cnt) { in check_btf_func()
15841 return -EINVAL; in check_btf_func()
15844 urec_size = attr->func_info_rec_size; in check_btf_func()
15846 prog = env->prog; in check_btf_func()
15847 btf = prog->aux->btf; in check_btf_func()
15849 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func()
15851 krecord = prog->aux->func_info; in check_btf_func()
15854 return -ENOMEM; in check_btf_func()
15858 ret = -EINVAL; in check_btf_func()
15860 if (env->subprog_info[i].start != krecord[i].insn_off) { in check_btf_func()
15867 info_aux[i].linkage = BTF_INFO_VLEN(type->info); in check_btf_func()
15869 func_proto = btf_type_by_id(btf, type->type); in check_btf_func()
15871 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); in check_btf_func()
15874 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { in check_btf_func()
15878 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { in check_btf_func()
15886 prog->aux->func_info_aux = info_aux; in check_btf_func()
15896 struct bpf_prog_aux *aux = env->prog->aux; in adjust_btf_func()
15899 if (!aux->func_info) in adjust_btf_func()
15903 for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++) in adjust_btf_func()
15904 aux->func_info[i].insn_off = env->subprog_info[i].start; in adjust_btf_func()
15922 nr_linfo = attr->line_info_cnt; in check_btf_line()
15926 return -EINVAL; in check_btf_line()
15928 rec_size = attr->line_info_rec_size; in check_btf_line()
15931 rec_size & (sizeof(u32) - 1)) in check_btf_line()
15932 return -EINVAL; in check_btf_line()
15940 return -ENOMEM; in check_btf_line()
15942 prog = env->prog; in check_btf_line()
15943 btf = prog->aux->btf; in check_btf_line()
15946 sub = env->subprog_info; in check_btf_line()
15947 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); in check_btf_line()
15953 if (err == -E2BIG) { in check_btf_line()
15958 err = -EFAULT; in check_btf_line()
15964 err = -EFAULT; in check_btf_line()
15971 * 2) bounded by prog->len in check_btf_line()
15980 linfo[i].insn_off >= prog->len) { in check_btf_line()
15981 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", in check_btf_line()
15983 prog->len); in check_btf_line()
15984 err = -EINVAL; in check_btf_line()
15988 if (!prog->insnsi[linfo[i].insn_off].code) { in check_btf_line()
15992 err = -EINVAL; in check_btf_line()
15999 err = -EINVAL; in check_btf_line()
16003 if (s != env->subprog_cnt) { in check_btf_line()
16009 err = -EINVAL; in check_btf_line()
16018 if (s != env->subprog_cnt) { in check_btf_line()
16020 env->subprog_cnt - s, s); in check_btf_line()
16021 err = -EINVAL; in check_btf_line()
16025 prog->aux->linfo = linfo; in check_btf_line()
16026 prog->aux->nr_linfo = nr_linfo; in check_btf_line()
16044 struct bpf_prog *prog = env->prog; in check_core_relo()
16045 const struct btf *btf = prog->aux->btf; in check_core_relo()
16047 .log = &env->log, in check_core_relo()
16053 nr_core_relo = attr->core_relo_cnt; in check_core_relo()
16057 return -EINVAL; in check_core_relo()
16059 rec_size = attr->core_relo_rec_size; in check_core_relo()
16063 return -EINVAL; in check_core_relo()
16065 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); in check_core_relo()
16069 /* Unlike func_info and line_info, copy and apply each CO-RE in check_core_relo()
16076 if (err == -E2BIG) { in check_core_relo()
16081 err = -EFAULT; in check_core_relo()
16087 err = -EFAULT; in check_core_relo()
16091 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { in check_core_relo()
16092 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", in check_core_relo()
16093 i, core_relo.insn_off, prog->len); in check_core_relo()
16094 err = -EINVAL; in check_core_relo()
16099 &prog->insnsi[core_relo.insn_off / 8]); in check_core_relo()
16114 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info_early()
16116 return -EINVAL; in check_btf_info_early()
16120 btf = btf_get_by_fd(attr->prog_btf_fd); in check_btf_info_early()
16125 return -EACCES; in check_btf_info_early()
16127 env->prog->aux->btf = btf; in check_btf_info_early()
16141 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info()
16143 return -EINVAL; in check_btf_info()
16166 return old->umin_value <= cur->umin_value && in range_within()
16167 old->umax_value >= cur->umax_value && in range_within()
16168 old->smin_value <= cur->smin_value && in range_within()
16169 old->smax_value >= cur->smax_value && in range_within()
16170 old->u32_min_value <= cur->u32_min_value && in range_within()
16171 old->u32_max_value >= cur->u32_max_value && in range_within()
16172 old->s32_min_value <= cur->s32_min_value && in range_within()
16173 old->s32_max_value >= cur->s32_max_value; in range_within()
16188 struct bpf_id_pair *map = idmap->map; in check_ids()
16221 old_id = old_id ? old_id : ++idmap->tmp_id_gen; in check_scalar_ids()
16222 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; in check_scalar_ids()
16234 live = st->regs[i].live; in clean_func_state()
16236 st->regs[i].live |= REG_LIVE_DONE; in clean_func_state()
16241 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
16244 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
16245 live = st->stack[i].spilled_ptr.live; in clean_func_state()
16247 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; in clean_func_state()
16249 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); in clean_func_state()
16251 st->stack[i].slot_type[j] = STACK_INVALID; in clean_func_state()
16261 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) in clean_verifier_state()
16265 for (i = 0; i <= st->curframe; i++) in clean_verifier_state()
16266 clean_func_state(env, st->frame[i]); in clean_verifier_state()
16308 if (sl->state.branches) in clean_live_states()
16310 if (sl->state.insn_idx != insn || in clean_live_states()
16311 !same_callsites(&sl->state, cur)) in clean_live_states()
16313 clean_verifier_state(env, &sl->state); in clean_live_states()
16315 sl = sl->next; in clean_live_states()
16324 check_ids(rold->id, rcur->id, idmap) && in regs_exact()
16325 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regs_exact()
16335 if (!(rold->live & REG_LIVE_READ)) in regsafe()
16338 if (rold->type == NOT_INIT) in regsafe()
16341 if (rcur->type == NOT_INIT) in regsafe()
16361 * a non-MAYBE_NULL variant. in regsafe()
16363 * non-MAYBE_NULL registers as well. in regsafe()
16365 if (rold->type != rcur->type) in regsafe()
16368 switch (base_type(rold->type)) { in regsafe()
16370 if (env->explore_alu_limits) { in regsafe()
16375 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
16377 if (!rold->precise) in regsafe()
16389 * First verification path is [1-6]: in regsafe()
16390 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; in regsafe()
16391 * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark in regsafe()
16393 * Next verification path is [1-4, 6]. in regsafe()
16396 * I. r6{.id=b}, r7{.id=b} via path 1-6; in regsafe()
16397 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. in regsafe()
16400 * --- in regsafe()
16404 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
16405 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
16416 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
16417 check_ids(rold->id, rcur->id, idmap) && in regsafe()
16418 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regsafe()
16424 * since someone could have accessed through (ptr - k), or in regsafe()
16425 * even done ptr -= k in a register, to get a safe access. in regsafe()
16427 if (rold->range > rcur->range) in regsafe()
16432 if (rold->off != rcur->off) in regsafe()
16435 if (!check_ids(rold->id, rcur->id, idmap)) in regsafe()
16439 tnum_in(rold->var_off, rcur->var_off); in regsafe()
16442 * the same stack frame, since fp-8 in foo != fp-8 in bar in regsafe()
16444 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; in regsafe()
16459 for (i = 0; i < old->allocated_stack; i++) { in stacksafe()
16465 old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
16466 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
16469 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) { in stacksafe()
16470 i += BPF_REG_SIZE - 1; in stacksafe()
16475 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
16478 if (env->allow_uninit_stack && in stacksafe()
16479 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) in stacksafe()
16485 if (i >= cur->allocated_stack) in stacksafe()
16489 * it will be safe with zero-initialized stack. in stacksafe()
16492 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
16493 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
16495 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
16496 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
16498 * this stack slot, but current has STACK_MISC -> in stacksafe()
16503 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
16506 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
16512 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} in stacksafe()
16514 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} in stacksafe()
16518 if (!regsafe(env, &old->stack[spi].spilled_ptr, in stacksafe()
16519 &cur->stack[spi].spilled_ptr, idmap, exact)) in stacksafe()
16523 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
16524 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
16525 if (old_reg->dynptr.type != cur_reg->dynptr.type || in stacksafe()
16526 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || in stacksafe()
16527 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
16531 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
16532 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
16539 if (old_reg->iter.btf != cur_reg->iter.btf || in stacksafe()
16540 old_reg->iter.btf_id != cur_reg->iter.btf_id || in stacksafe()
16541 old_reg->iter.state != cur_reg->iter.state || in stacksafe()
16542 /* ignore {old_reg,cur_reg}->iter.depth, see above */ in stacksafe()
16543 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
16563 if (old->acquired_refs != cur->acquired_refs) in refsafe()
16566 for (i = 0; i < old->acquired_refs; i++) { in refsafe()
16567 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap)) in refsafe()
16605 if (old->callback_depth > cur->callback_depth) in func_states_equal()
16609 if (!regsafe(env, &old->regs[i], &cur->regs[i], in func_states_equal()
16610 &env->idmap_scratch, exact)) in func_states_equal()
16613 if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) in func_states_equal()
16616 if (!refsafe(old, cur, &env->idmap_scratch)) in func_states_equal()
16624 env->idmap_scratch.tmp_id_gen = env->id_gen; in reset_idmap_scratch()
16625 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); in reset_idmap_scratch()
16635 if (old->curframe != cur->curframe) in states_equal()
16641 * must never prune a non-speculative execution one. in states_equal()
16643 if (old->speculative && !cur->speculative) in states_equal()
16646 if (old->active_lock.ptr != cur->active_lock.ptr) in states_equal()
16652 if (!!old->active_lock.id != !!cur->active_lock.id) in states_equal()
16655 if (old->active_lock.id && in states_equal()
16656 !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch)) in states_equal()
16659 if (old->active_rcu_lock != cur->active_rcu_lock) in states_equal()
16665 for (i = 0; i <= old->curframe; i++) { in states_equal()
16666 if (old->frame[i]->callsite != cur->frame[i]->callsite) in states_equal()
16668 if (!func_states_equal(env, old->frame[i], cur->frame[i], exact)) in states_equal()
16681 u8 parent_flag = parent_reg->live & REG_LIVE_READ; in propagate_liveness_reg()
16682 u8 flag = reg->live & REG_LIVE_READ; in propagate_liveness_reg()
16704 * straight-line code between a state and its parent. When we arrive at an
16705 * equivalent state (jump target or such) we didn't arrive by the straight-line
16707 * of the state's write marks. That's what 'parent == state->parent' comparison
16718 if (vparent->curframe != vstate->curframe) { in propagate_liveness()
16720 vparent->curframe, vstate->curframe); in propagate_liveness()
16721 return -EFAULT; in propagate_liveness()
16725 for (frame = 0; frame <= vstate->curframe; frame++) { in propagate_liveness()
16726 parent = vparent->frame[frame]; in propagate_liveness()
16727 state = vstate->frame[frame]; in propagate_liveness()
16728 parent_reg = parent->regs; in propagate_liveness()
16729 state_reg = state->regs; in propagate_liveness()
16730 /* We don't need to worry about FP liveness, it's read-only */ in propagate_liveness()
16731 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { in propagate_liveness()
16741 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
16742 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
16743 parent_reg = &parent->stack[i].spilled_ptr; in propagate_liveness()
16744 state_reg = &state->stack[i].spilled_ptr; in propagate_liveness()
16765 for (fr = old->curframe; fr >= 0; fr--) { in propagate_precision()
16766 state = old->frame[fr]; in propagate_precision()
16767 state_reg = state->regs; in propagate_precision()
16770 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
16771 !state_reg->precise || in propagate_precision()
16772 !(state_reg->live & REG_LIVE_READ)) in propagate_precision()
16774 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
16780 bt_set_frame_reg(&env->bt, fr, i); in propagate_precision()
16784 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
16785 if (!is_spilled_reg(&state->stack[i])) in propagate_precision()
16787 state_reg = &state->stack[i].spilled_ptr; in propagate_precision()
16788 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
16789 !state_reg->precise || in propagate_precision()
16790 !(state_reg->live & REG_LIVE_READ)) in propagate_precision()
16792 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
16795 fr, (-i - 1) * BPF_REG_SIZE); in propagate_precision()
16797 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
16799 bt_set_frame_slot(&env->bt, fr, i); in propagate_precision()
16817 int i, fr = cur->curframe; in states_maybe_looping()
16819 if (old->curframe != fr) in states_maybe_looping()
16822 fold = old->frame[fr]; in states_maybe_looping()
16823 fcur = cur->frame[fr]; in states_maybe_looping()
16825 if (memcmp(&fold->regs[i], &fcur->regs[i], in states_maybe_looping()
16833 return env->insn_aux_data[insn_idx].is_iter_next; in is_iter_next_insn()
16843 * Here's a situation in pseudo-BPF assembly form:
16863 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
16870 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
16890 * while (x--) {} // <<-- infinite loop here
16900 for (fr = old->curframe; fr >= 0; fr--) { in iter_active_depths_differ()
16901 state = old->frame[fr]; in iter_active_depths_differ()
16902 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in iter_active_depths_differ()
16903 if (state->stack[i].slot_type[0] != STACK_ITER) in iter_active_depths_differ()
16906 slot = &state->stack[i].spilled_ptr; in iter_active_depths_differ()
16907 if (slot->iter.state != BPF_ITER_STATE_ACTIVE) in iter_active_depths_differ()
16910 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; in iter_active_depths_differ()
16911 if (cur_slot->iter.depth != slot->iter.depth) in iter_active_depths_differ()
16922 struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; in is_state_visited()
16924 bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx); in is_state_visited()
16929 * http://vger.kernel.org/bpfconf2019.html#session-1 in is_state_visited()
16936 if (env->jmps_processed - env->prev_jmps_processed >= 2 && in is_state_visited()
16937 env->insn_processed - env->prev_insn_processed >= 8) in is_state_visited()
16947 if (sl->state.insn_idx != insn_idx) in is_state_visited()
16950 if (sl->state.branches) { in is_state_visited()
16951 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; in is_state_visited()
16953 if (frame->in_async_callback_fn && in is_state_visited()
16954 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { in is_state_visited()
16968 /* BPF open-coded iterators loop detection is special. in is_state_visited()
16984 * 1. r7 = -16 in is_state_visited()
16986 * 3. while (bpf_iter_num_next(&fp[-8])) { in is_state_visited()
16988 * 5. r7 = -32 in is_state_visited()
16998 * Here verifier would first visit path 1-3, create a checkpoint at 3 in is_state_visited()
16999 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does in is_state_visited()
17001 * comparison would discard current state with r7=-32 in is_state_visited()
17005 if (states_equal(env, &sl->state, cur, true)) { in is_state_visited()
17010 cur_frame = cur->frame[cur->curframe]; in is_state_visited()
17014 iter_reg = &cur_frame->regs[BPF_REG_1]; in is_state_visited()
17017 * no need for extra (re-)validations in is_state_visited()
17019 spi = __get_spi(iter_reg->off + iter_reg->var_off.value); in is_state_visited()
17020 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; in is_state_visited()
17021 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { in is_state_visited()
17022 update_loop_entry(cur, &sl->state); in is_state_visited()
17029 if (states_equal(env, &sl->state, cur, true)) in is_state_visited()
17034 if (states_maybe_looping(&sl->state, cur) && in is_state_visited()
17035 states_equal(env, &sl->state, cur, false) && in is_state_visited()
17036 !iter_active_depths_differ(&sl->state, cur) && in is_state_visited()
17037 sl->state.callback_unroll_depth == cur->callback_unroll_depth) { in is_state_visited()
17041 print_verifier_state(env, cur->frame[cur->curframe], true); in is_state_visited()
17043 print_verifier_state(env, sl->state.frame[cur->curframe], true); in is_state_visited()
17044 return -EINVAL; in is_state_visited()
17053 * if r1 < 1000000 goto pc-2 in is_state_visited()
17060 env->jmps_processed - env->prev_jmps_processed < 20 && in is_state_visited()
17061 env->insn_processed - env->prev_insn_processed < 100) in is_state_visited()
17065 /* If sl->state is a part of a loop and this loop's entry is a part of in is_state_visited()
17072 * .---------> hdr All branches from 'succ' had been explored in is_state_visited()
17075 * | .------... Suppose states 'cur' and 'succ' correspond in is_state_visited()
17081 * | succ <- cur To check if that is the case, verify in is_state_visited()
17086 * '----' in is_state_visited()
17090 loop_entry = get_loop_entry(&sl->state); in is_state_visited()
17091 force_exact = loop_entry && loop_entry->branches > 0; in is_state_visited()
17092 if (states_equal(env, &sl->state, cur, force_exact)) { in is_state_visited()
17096 sl->hit_cnt++; in is_state_visited()
17100 * If we have any write marks in env->cur_state, they in is_state_visited()
17107 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
17114 if (is_jmp_point(env, env->insn_idx)) in is_state_visited()
17116 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
17129 sl->miss_cnt++; in is_state_visited()
17138 n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; in is_state_visited()
17139 if (sl->miss_cnt > sl->hit_cnt * n + n) { in is_state_visited()
17143 *pprev = sl->next; in is_state_visited()
17144 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE && in is_state_visited()
17145 !sl->state.used_as_loop_entry) { in is_state_visited()
17146 u32 br = sl->state.branches; in is_state_visited()
17151 free_verifier_state(&sl->state, false); in is_state_visited()
17153 env->peak_states--; in is_state_visited()
17159 sl->next = env->free_list; in is_state_visited()
17160 env->free_list = sl; in is_state_visited()
17166 pprev = &sl->next; in is_state_visited()
17170 if (env->max_states_per_insn < states_cnt) in is_state_visited()
17171 env->max_states_per_insn = states_cnt; in is_state_visited()
17173 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) in is_state_visited()
17185 * When looping the sl->state.branches will be > 0 and this state in is_state_visited()
17190 return -ENOMEM; in is_state_visited()
17191 env->total_states++; in is_state_visited()
17192 env->peak_states++; in is_state_visited()
17193 env->prev_jmps_processed = env->jmps_processed; in is_state_visited()
17194 env->prev_insn_processed = env->insn_processed; in is_state_visited()
17197 if (env->bpf_capable) in is_state_visited()
17201 new = &new_sl->state; in is_state_visited()
17208 new->insn_idx = insn_idx; in is_state_visited()
17209 WARN_ONCE(new->branches != 1, in is_state_visited()
17210 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); in is_state_visited()
17212 cur->parent = new; in is_state_visited()
17213 cur->first_insn_idx = insn_idx; in is_state_visited()
17214 cur->dfs_depth = new->dfs_depth + 1; in is_state_visited()
17216 new_sl->next = *explored_state(env, insn_idx); in is_state_visited()
17219 * registers connected. Only r6 - r9 of the callers are alive (pushed in is_state_visited()
17221 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to in is_state_visited()
17231 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
17232 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) in is_state_visited()
17233 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; in is_state_visited()
17235 cur->frame[j]->regs[i].live = REG_LIVE_NONE; in is_state_visited()
17239 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
17240 struct bpf_func_state *frame = cur->frame[j]; in is_state_visited()
17241 struct bpf_func_state *newframe = new->frame[j]; in is_state_visited()
17243 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
17244 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; in is_state_visited()
17245 frame->stack[i].spilled_ptr.parent = in is_state_visited()
17246 &newframe->stack[i].spilled_ptr; in is_state_visited()
17289 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; in save_aux_ptr_type()
17317 return -EINVAL; in save_aux_ptr_type()
17326 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check()
17327 struct bpf_verifier_state *state = env->cur_state; in do_check()
17328 struct bpf_insn *insns = env->prog->insnsi; in do_check()
17330 int insn_cnt = env->prog->len; in do_check()
17332 int prev_insn_idx = -1; in do_check()
17341 env->cur_hist_ent = NULL; in do_check()
17343 env->prev_insn_idx = prev_insn_idx; in do_check()
17344 if (env->insn_idx >= insn_cnt) { in do_check()
17346 env->insn_idx, insn_cnt); in do_check()
17347 return -EFAULT; in do_check()
17350 insn = &insns[env->insn_idx]; in do_check()
17351 class = BPF_CLASS(insn->code); in do_check()
17353 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { in do_check()
17356 env->insn_processed); in do_check()
17357 return -E2BIG; in do_check()
17360 state->last_insn_idx = env->prev_insn_idx; in do_check()
17362 if (is_prune_point(env, env->insn_idx)) { in do_check()
17363 err = is_state_visited(env, env->insn_idx); in do_check()
17368 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
17371 env->prev_insn_idx, env->insn_idx, in do_check()
17372 env->cur_state->speculative ? in do_check()
17375 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
17381 if (is_jmp_point(env, env->insn_idx)) { in do_check()
17388 return -EAGAIN; in do_check()
17393 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { in do_check()
17395 env->prev_insn_idx, env->insn_idx, in do_check()
17396 env->cur_state->speculative ? in do_check()
17398 print_verifier_state(env, state->frame[state->curframe], true); in do_check()
17402 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
17410 print_insn_state(env, state->frame[state->curframe]); in do_check()
17412 verbose_linfo(env, env->insn_idx, "; "); in do_check()
17413 env->prev_log_pos = env->log.end_pos; in do_check()
17414 verbose(env, "%d: ", env->insn_idx); in do_check()
17415 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in do_check()
17416 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; in do_check()
17417 env->prev_log_pos = env->log.end_pos; in do_check()
17420 if (bpf_prog_is_offloaded(env->prog->aux)) { in do_check()
17421 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
17422 env->prev_insn_idx); in do_check()
17429 prev_insn_idx = env->insn_idx; in do_check()
17442 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
17446 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in do_check()
17450 src_reg_type = regs[insn->src_reg].type; in do_check()
17455 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
17456 insn->off, BPF_SIZE(insn->code), in do_check()
17457 BPF_READ, insn->dst_reg, false, in do_check()
17458 BPF_MODE(insn->code) == BPF_MEMSX); in do_check()
17460 err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], "ldx"); in do_check()
17466 if (BPF_MODE(insn->code) == BPF_ATOMIC) { in do_check()
17467 err = check_atomic(env, env->insn_idx, insn); in do_check()
17470 env->insn_idx++; in do_check()
17474 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { in do_check()
17476 return -EINVAL; in do_check()
17480 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
17484 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
17488 dst_reg_type = regs[insn->dst_reg].type; in do_check()
17491 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
17492 insn->off, BPF_SIZE(insn->code), in do_check()
17493 BPF_WRITE, insn->src_reg, false, false); in do_check()
17503 if (BPF_MODE(insn->code) != BPF_MEM || in do_check()
17504 insn->src_reg != BPF_REG_0) { in do_check()
17506 return -EINVAL; in do_check()
17509 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
17513 dst_reg_type = regs[insn->dst_reg].type; in do_check()
17516 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
17517 insn->off, BPF_SIZE(insn->code), in do_check()
17518 BPF_WRITE, -1, false, false); in do_check()
17526 u8 opcode = BPF_OP(insn->code); in do_check()
17528 env->jmps_processed++; in do_check()
17530 if (BPF_SRC(insn->code) != BPF_K || in do_check()
17531 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL in do_check()
17532 && insn->off != 0) || in do_check()
17533 (insn->src_reg != BPF_REG_0 && in do_check()
17534 insn->src_reg != BPF_PSEUDO_CALL && in do_check()
17535 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || in do_check()
17536 insn->dst_reg != BPF_REG_0 || in do_check()
17539 return -EINVAL; in do_check()
17542 if (env->cur_state->active_lock.ptr) { in do_check()
17543 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || in do_check()
17544 (insn->src_reg == BPF_PSEUDO_CALL) || in do_check()
17545 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && in do_check()
17546 (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) { in do_check()
17548 return -EINVAL; in do_check()
17551 if (insn->src_reg == BPF_PSEUDO_CALL) { in do_check()
17552 err = check_func_call(env, insn, &env->insn_idx); in do_check()
17553 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_check()
17554 err = check_kfunc_call(env, insn, &env->insn_idx); in do_check()
17560 err = check_helper_call(env, insn, &env->insn_idx); in do_check()
17567 if (BPF_SRC(insn->code) != BPF_K || in do_check()
17568 insn->src_reg != BPF_REG_0 || in do_check()
17569 insn->dst_reg != BPF_REG_0 || in do_check()
17570 (class == BPF_JMP && insn->imm != 0) || in do_check()
17571 (class == BPF_JMP32 && insn->off != 0)) { in do_check()
17573 return -EINVAL; in do_check()
17577 env->insn_idx += insn->off + 1; in do_check()
17579 env->insn_idx += insn->imm + 1; in do_check()
17583 if (BPF_SRC(insn->code) != BPF_K || in do_check()
17584 insn->imm != 0 || in do_check()
17585 insn->src_reg != BPF_REG_0 || in do_check()
17586 insn->dst_reg != BPF_REG_0 || in do_check()
17589 return -EINVAL; in do_check()
17592 if (env->cur_state->active_lock.ptr && in do_check()
17595 return -EINVAL; in do_check()
17598 if (env->cur_state->active_rcu_lock && in do_check()
17601 return -EINVAL; in do_check()
17606 * state->curframe > 0, it may be a callback in do_check()
17627 if (state->curframe) { in do_check()
17629 err = prepare_func_exit(env, &env->insn_idx); in do_check()
17641 update_branch_counts(env, env->cur_state); in do_check()
17643 &env->insn_idx, pop_log); in do_check()
17645 if (err != -ENOENT) in do_check()
17653 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
17658 u8 mode = BPF_MODE(insn->code); in do_check()
17670 env->insn_idx++; in do_check()
17674 return -EINVAL; in do_check()
17678 return -EINVAL; in do_check()
17681 env->insn_idx++; in do_check()
17706 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) in find_btf_percpu_datasec()
17709 tname = btf_name_by_offset(btf, t->name_off); in find_btf_percpu_datasec()
17714 return -ENOENT; in find_btf_percpu_datasec()
17728 u32 type, id = insn->imm; in check_pseudo_btf_id()
17739 return -EINVAL; in check_pseudo_btf_id()
17744 return -EINVAL; in check_pseudo_btf_id()
17753 err = -ENOENT; in check_pseudo_btf_id()
17759 err = -EINVAL; in check_pseudo_btf_id()
17763 sym_name = btf_name_by_offset(btf, t->name_off); in check_pseudo_btf_id()
17768 err = -ENOENT; in check_pseudo_btf_id()
17775 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in check_pseudo_btf_id()
17776 aux->btf_var.mem_size = 0; in check_pseudo_btf_id()
17784 if (vsi->type == id) { in check_pseudo_btf_id()
17791 type = t->type; in check_pseudo_btf_id()
17794 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; in check_pseudo_btf_id()
17795 aux->btf_var.btf = btf; in check_pseudo_btf_id()
17796 aux->btf_var.btf_id = type; in check_pseudo_btf_id()
17805 tname = btf_name_by_offset(btf, t->name_off); in check_pseudo_btf_id()
17808 err = -EINVAL; in check_pseudo_btf_id()
17811 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in check_pseudo_btf_id()
17812 aux->btf_var.mem_size = tsize; in check_pseudo_btf_id()
17814 aux->btf_var.reg_type = PTR_TO_BTF_ID; in check_pseudo_btf_id()
17815 aux->btf_var.btf = btf; in check_pseudo_btf_id()
17816 aux->btf_var.btf_id = type; in check_pseudo_btf_id()
17820 for (i = 0; i < env->used_btf_cnt; i++) { in check_pseudo_btf_id()
17821 if (env->used_btfs[i].btf == btf) { in check_pseudo_btf_id()
17827 if (env->used_btf_cnt >= MAX_USED_BTFS) { in check_pseudo_btf_id()
17828 err = -E2BIG; in check_pseudo_btf_id()
17832 btf_mod = &env->used_btfs[env->used_btf_cnt]; in check_pseudo_btf_id()
17833 btf_mod->btf = btf; in check_pseudo_btf_id()
17834 btf_mod->module = NULL; in check_pseudo_btf_id()
17838 btf_mod->module = btf_try_get_module(btf); in check_pseudo_btf_id()
17839 if (!btf_mod->module) { in check_pseudo_btf_id()
17840 err = -ENXIO; in check_pseudo_btf_id()
17845 env->used_btf_cnt++; in check_pseudo_btf_id()
17874 if (btf_record_has_field(map->record, BPF_LIST_HEAD) || in check_map_prog_compatibility()
17875 btf_record_has_field(map->record, BPF_RB_ROOT)) { in check_map_prog_compatibility()
17878 return -EINVAL; in check_map_prog_compatibility()
17882 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in check_map_prog_compatibility()
17885 return -EINVAL; in check_map_prog_compatibility()
17890 return -EINVAL; in check_map_prog_compatibility()
17894 if (btf_record_has_field(map->record, BPF_TIMER)) { in check_map_prog_compatibility()
17897 return -EINVAL; in check_map_prog_compatibility()
17901 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && in check_map_prog_compatibility()
17904 return -EINVAL; in check_map_prog_compatibility()
17907 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in check_map_prog_compatibility()
17909 return -EINVAL; in check_map_prog_compatibility()
17912 if (prog->aux->sleepable) in check_map_prog_compatibility()
17913 switch (map->map_type) { in check_map_prog_compatibility()
17932 return -EINVAL; in check_map_prog_compatibility()
17940 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || in bpf_map_is_cgroup_storage()
17941 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); in bpf_map_is_cgroup_storage()
17953 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64()
17954 int insn_cnt = env->prog->len; in resolve_pseudo_ldimm64()
17957 err = bpf_prog_calc_tag(env->prog); in resolve_pseudo_ldimm64()
17962 if (BPF_CLASS(insn->code) == BPF_LDX && in resolve_pseudo_ldimm64()
17963 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || in resolve_pseudo_ldimm64()
17964 insn->imm != 0)) { in resolve_pseudo_ldimm64()
17966 return -EINVAL; in resolve_pseudo_ldimm64()
17976 if (i == insn_cnt - 1 || insn[1].code != 0 || in resolve_pseudo_ldimm64()
17980 return -EINVAL; in resolve_pseudo_ldimm64()
17984 /* valid generic load 64-bit imm */ in resolve_pseudo_ldimm64()
17988 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
17996 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
17997 aux->ptr_type = PTR_TO_FUNC; in resolve_pseudo_ldimm64()
18002 * converted into regular 64-bit imm load insn. in resolve_pseudo_ldimm64()
18015 return -EINVAL; in resolve_pseudo_ldimm64()
18021 if (bpfptr_is_null(env->fd_array)) { in resolve_pseudo_ldimm64()
18023 return -EPROTO; in resolve_pseudo_ldimm64()
18025 if (copy_from_bpfptr_offset(&fd, env->fd_array, in resolve_pseudo_ldimm64()
18028 return -EFAULT; in resolve_pseudo_ldimm64()
18043 err = check_map_prog_compatibility(env, map, env->prog); in resolve_pseudo_ldimm64()
18049 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
18059 return -EINVAL; in resolve_pseudo_ldimm64()
18062 if (!map->ops->map_direct_value_addr) { in resolve_pseudo_ldimm64()
18065 return -EINVAL; in resolve_pseudo_ldimm64()
18068 err = map->ops->map_direct_value_addr(map, &addr, off); in resolve_pseudo_ldimm64()
18071 map->value_size, off); in resolve_pseudo_ldimm64()
18076 aux->map_off = off; in resolve_pseudo_ldimm64()
18084 for (j = 0; j < env->used_map_cnt; j++) { in resolve_pseudo_ldimm64()
18085 if (env->used_maps[j] == map) { in resolve_pseudo_ldimm64()
18086 aux->map_index = j; in resolve_pseudo_ldimm64()
18092 if (env->used_map_cnt >= MAX_USED_MAPS) { in resolve_pseudo_ldimm64()
18094 return -E2BIG; in resolve_pseudo_ldimm64()
18097 if (env->prog->aux->sleepable) in resolve_pseudo_ldimm64()
18098 atomic64_inc(&map->sleepable_refcnt); in resolve_pseudo_ldimm64()
18106 aux->map_index = env->used_map_cnt; in resolve_pseudo_ldimm64()
18107 env->used_maps[env->used_map_cnt++] = map; in resolve_pseudo_ldimm64()
18110 bpf_cgroup_storage_assign(env->prog->aux, map)) { in resolve_pseudo_ldimm64()
18113 return -EBUSY; in resolve_pseudo_ldimm64()
18124 if (!bpf_opcode_in_insntable(insn->code)) { in resolve_pseudo_ldimm64()
18125 verbose(env, "unknown opcode %02x\n", insn->code); in resolve_pseudo_ldimm64()
18126 return -EINVAL; in resolve_pseudo_ldimm64()
18140 __bpf_free_used_maps(env->prog->aux, env->used_maps, in release_maps()
18141 env->used_map_cnt); in release_maps()
18147 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, in release_btfs()
18148 env->used_btf_cnt); in release_btfs()
18154 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
18155 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
18159 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) in convert_pseudo_ld_imm64()
18161 if (insn->src_reg == BPF_PSEUDO_FUNC) in convert_pseudo_ld_imm64()
18163 insn->src_reg = 0; in convert_pseudo_ld_imm64()
18167 /* single env->prog->insni[off] instruction was replaced with the range
18175 struct bpf_insn_aux_data *old_data = env->insn_aux_data; in adjust_insn_aux_data()
18176 struct bpf_insn *insn = new_prog->insnsi; in adjust_insn_aux_data()
18185 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); in adjust_insn_aux_data()
18189 prog_len = new_prog->len; in adjust_insn_aux_data()
18192 memcpy(new_data + off + cnt - 1, old_data + off, in adjust_insn_aux_data()
18193 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); in adjust_insn_aux_data()
18194 for (i = off; i < off + cnt - 1; i++) { in adjust_insn_aux_data()
18199 env->insn_aux_data = new_data; in adjust_insn_aux_data()
18210 for (i = 0; i <= env->subprog_cnt; i++) { in adjust_subprog_starts()
18211 if (env->subprog_info[i].start <= off) in adjust_subprog_starts()
18213 env->subprog_info[i].start += len - 1; in adjust_subprog_starts()
18219 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; in adjust_poke_descs()
18220 int i, sz = prog->aux->size_poke_tab; in adjust_poke_descs()
18225 if (desc->insn_idx <= off) in adjust_poke_descs()
18227 desc->insn_idx += len - 1; in adjust_poke_descs()
18238 new_data = vzalloc(array_size(env->prog->len + len - 1, in bpf_patch_insn_data()
18244 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
18246 if (PTR_ERR(new_prog) == -ERANGE) in bpf_patch_insn_data()
18248 "insn %d cannot be patched due to 16-bit range\n", in bpf_patch_insn_data()
18249 env->insn_aux_data[off].orig_idx); in bpf_patch_insn_data()
18265 for (i = 0; i < env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
18266 if (env->subprog_info[i].start >= off) in adjust_subprog_starts_after_remove()
18269 for (j = i; j < env->subprog_cnt; j++) in adjust_subprog_starts_after_remove()
18270 if (env->subprog_info[j].start >= off + cnt) in adjust_subprog_starts_after_remove()
18275 if (env->subprog_info[j].start != off + cnt) in adjust_subprog_starts_after_remove()
18276 j--; in adjust_subprog_starts_after_remove()
18279 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
18283 move = env->subprog_cnt + 1 - j; in adjust_subprog_starts_after_remove()
18285 memmove(env->subprog_info + i, in adjust_subprog_starts_after_remove()
18286 env->subprog_info + j, in adjust_subprog_starts_after_remove()
18287 sizeof(*env->subprog_info) * move); in adjust_subprog_starts_after_remove()
18288 env->subprog_cnt -= j - i; in adjust_subprog_starts_after_remove()
18291 if (aux->func_info) { in adjust_subprog_starts_after_remove()
18292 move = aux->func_info_cnt - j; in adjust_subprog_starts_after_remove()
18294 memmove(aux->func_info + i, in adjust_subprog_starts_after_remove()
18295 aux->func_info + j, in adjust_subprog_starts_after_remove()
18296 sizeof(*aux->func_info) * move); in adjust_subprog_starts_after_remove()
18297 aux->func_info_cnt -= j - i; in adjust_subprog_starts_after_remove()
18298 /* func_info->insn_off is set after all code rewrites, in adjust_subprog_starts_after_remove()
18299 * in adjust_btf_func() - no need to adjust in adjust_subprog_starts_after_remove()
18304 if (env->subprog_info[i].start == off) in adjust_subprog_starts_after_remove()
18309 for (; i <= env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
18310 env->subprog_info[i].start -= cnt; in adjust_subprog_starts_after_remove()
18318 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove()
18322 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
18326 linfo = prog->aux->linfo; in bpf_adj_linfo_after_remove()
18342 * last removed linfo. prog is already modified, so prog->len == off in bpf_adj_linfo_after_remove()
18345 if (prog->len != off && l_cnt && in bpf_adj_linfo_after_remove()
18347 l_cnt--; in bpf_adj_linfo_after_remove()
18348 linfo[--i].insn_off = off + cnt; in bpf_adj_linfo_after_remove()
18354 sizeof(*linfo) * (nr_linfo - i)); in bpf_adj_linfo_after_remove()
18356 prog->aux->nr_linfo -= l_cnt; in bpf_adj_linfo_after_remove()
18357 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
18362 linfo[i].insn_off -= cnt; in bpf_adj_linfo_after_remove()
18365 for (i = 0; i <= env->subprog_cnt; i++) in bpf_adj_linfo_after_remove()
18366 if (env->subprog_info[i].linfo_idx > l_off) { in bpf_adj_linfo_after_remove()
18370 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) in bpf_adj_linfo_after_remove()
18371 env->subprog_info[i].linfo_idx -= l_cnt; in bpf_adj_linfo_after_remove()
18373 env->subprog_info[i].linfo_idx = l_off; in bpf_adj_linfo_after_remove()
18381 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in verifier_remove_insns()
18382 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
18385 if (bpf_prog_is_offloaded(env->prog->aux)) in verifier_remove_insns()
18388 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
18401 sizeof(*aux_data) * (orig_prog_len - off - cnt)); in verifier_remove_insns()
18408 * have dead code too. Therefore replace all dead at-run-time code
18409 * with 'ja -1'.
18419 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in sanitize_dead_code()
18420 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); in sanitize_dead_code()
18421 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
18422 const int insn_cnt = env->prog->len; in sanitize_dead_code()
18449 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_hard_wire_dead_code_branches()
18451 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
18452 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
18456 if (!insn_is_cond_jump(insn->code)) in opt_hard_wire_dead_code_branches()
18460 ja.off = insn->off; in opt_hard_wire_dead_code_branches()
18461 else if (!aux_data[i + 1 + insn->off].seen) in opt_hard_wire_dead_code_branches()
18466 if (bpf_prog_is_offloaded(env->prog->aux)) in opt_hard_wire_dead_code_branches()
18475 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_remove_dead_code()
18476 int insn_cnt = env->prog->len; in opt_remove_dead_code()
18491 insn_cnt = env->prog->len; in opt_remove_dead_code()
18500 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
18501 int insn_cnt = env->prog->len; in opt_remove_nops()
18511 insn_cnt--; in opt_remove_nops()
18512 i--; in opt_remove_nops()
18522 struct bpf_insn_aux_data *aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
18523 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
18524 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
18528 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; in opt_subreg_zext_lo32_rnd_hi32()
18549 if (load_reg == -1) in opt_subreg_zext_lo32_rnd_hi32()
18577 /* Add in an zero-extend instruction if a) the JIT has requested in opt_subreg_zext_lo32_rnd_hi32()
18581 * R0, therefore always zero-extends. However some archs' in opt_subreg_zext_lo32_rnd_hi32()
18584 * orthogonal to the general zero-extension behaviour of the in opt_subreg_zext_lo32_rnd_hi32()
18590 /* Zero-extension is done by the caller. */ in opt_subreg_zext_lo32_rnd_hi32()
18594 if (WARN_ON(load_reg == -1)) { in opt_subreg_zext_lo32_rnd_hi32()
18596 return -EFAULT; in opt_subreg_zext_lo32_rnd_hi32()
18607 return -ENOMEM; in opt_subreg_zext_lo32_rnd_hi32()
18608 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
18609 insns = new_prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
18610 aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
18611 delta += patch_len - 1; in opt_subreg_zext_lo32_rnd_hi32()
18619 * struct __sk_buff -> struct sk_buff
18620 * struct bpf_sock_ops -> struct sock
18624 const struct bpf_verifier_ops *ops = env->ops; in convert_ctx_accesses()
18626 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
18633 if (ops->gen_prologue || env->seen_direct_write) { in convert_ctx_accesses()
18634 if (!ops->gen_prologue) { in convert_ctx_accesses()
18636 return -EINVAL; in convert_ctx_accesses()
18638 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, in convert_ctx_accesses()
18639 env->prog); in convert_ctx_accesses()
18642 return -EINVAL; in convert_ctx_accesses()
18646 return -ENOMEM; in convert_ctx_accesses()
18648 env->prog = new_prog; in convert_ctx_accesses()
18649 delta += cnt - 1; in convert_ctx_accesses()
18653 if (bpf_prog_is_offloaded(env->prog->aux)) in convert_ctx_accesses()
18656 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
18662 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
18663 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
18664 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
18665 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
18666 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || in convert_ctx_accesses()
18667 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || in convert_ctx_accesses()
18668 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { in convert_ctx_accesses()
18670 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
18671 insn->code == (BPF_STX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
18672 insn->code == (BPF_STX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
18673 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
18674 insn->code == (BPF_ST | BPF_MEM | BPF_B) || in convert_ctx_accesses()
18675 insn->code == (BPF_ST | BPF_MEM | BPF_H) || in convert_ctx_accesses()
18676 insn->code == (BPF_ST | BPF_MEM | BPF_W) || in convert_ctx_accesses()
18677 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { in convert_ctx_accesses()
18684 env->insn_aux_data[i + delta].sanitize_stack_spill) { in convert_ctx_accesses()
18693 return -ENOMEM; in convert_ctx_accesses()
18695 delta += cnt - 1; in convert_ctx_accesses()
18696 env->prog = new_prog; in convert_ctx_accesses()
18697 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
18701 switch ((int)env->insn_aux_data[i + delta].ptr_type) { in convert_ctx_accesses()
18703 if (!ops->convert_ctx_access) in convert_ctx_accesses()
18705 convert_ctx_access = ops->convert_ctx_access; in convert_ctx_accesses()
18727 if (BPF_MODE(insn->code) == BPF_MEM) in convert_ctx_accesses()
18728 insn->code = BPF_LDX | BPF_PROBE_MEM | in convert_ctx_accesses()
18729 BPF_SIZE((insn)->code); in convert_ctx_accesses()
18731 insn->code = BPF_LDX | BPF_PROBE_MEMSX | in convert_ctx_accesses()
18732 BPF_SIZE((insn)->code); in convert_ctx_accesses()
18733 env->prog->aux->num_exentries++; in convert_ctx_accesses()
18740 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; in convert_ctx_accesses()
18742 mode = BPF_MODE(insn->code); in convert_ctx_accesses()
18745 * convert to a 4/8-byte load, to minimum program type specific in convert_ctx_accesses()
18751 off = insn->off; in convert_ctx_accesses()
18757 return -EINVAL; in convert_ctx_accesses()
18766 insn->off = off & ~(size_default - 1); in convert_ctx_accesses()
18767 insn->code = BPF_LDX | BPF_MEM | size_code; in convert_ctx_accesses()
18771 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
18776 return -EINVAL; in convert_ctx_accesses()
18784 return -EINVAL; in convert_ctx_accesses()
18789 insn->dst_reg, in convert_ctx_accesses()
18791 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
18792 (1 << size * 8) - 1); in convert_ctx_accesses()
18796 insn->dst_reg, in convert_ctx_accesses()
18798 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
18799 (1ULL << size * 8) - 1); in convert_ctx_accesses()
18804 insn->dst_reg, insn->dst_reg, in convert_ctx_accesses()
18809 return -ENOMEM; in convert_ctx_accesses()
18811 delta += cnt - 1; in convert_ctx_accesses()
18814 env->prog = new_prog; in convert_ctx_accesses()
18815 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
18823 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs()
18830 if (env->subprog_cnt <= 1) in jit_subprogs()
18833 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
18838 * need a hard reject of the program. Thus -EFAULT is in jit_subprogs()
18841 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
18844 i + insn->imm + 1); in jit_subprogs()
18845 return -EFAULT; in jit_subprogs()
18850 insn->off = subprog; in jit_subprogs()
18854 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
18856 insn->imm = 1; in jit_subprogs()
18869 err = -ENOMEM; in jit_subprogs()
18870 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
18874 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
18876 subprog_end = env->subprog_info[i + 1].start; in jit_subprogs()
18878 len = subprog_end - subprog_start; in jit_subprogs()
18882 * func[i]->stats will never be accessed and stays NULL in jit_subprogs()
18887 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], in jit_subprogs()
18889 func[i]->type = prog->type; in jit_subprogs()
18890 func[i]->len = len; in jit_subprogs()
18893 func[i]->is_func = 1; in jit_subprogs()
18894 func[i]->aux->func_idx = i; in jit_subprogs()
18895 /* Below members will be freed only at prog->aux */ in jit_subprogs()
18896 func[i]->aux->btf = prog->aux->btf; in jit_subprogs()
18897 func[i]->aux->func_info = prog->aux->func_info; in jit_subprogs()
18898 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; in jit_subprogs()
18899 func[i]->aux->poke_tab = prog->aux->poke_tab; in jit_subprogs()
18900 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; in jit_subprogs()
18902 for (j = 0; j < prog->aux->size_poke_tab; j++) { in jit_subprogs()
18905 poke = &prog->aux->poke_tab[j]; in jit_subprogs()
18906 if (poke->insn_idx < subprog_end && in jit_subprogs()
18907 poke->insn_idx >= subprog_start) in jit_subprogs()
18908 poke->aux = func[i]->aux; in jit_subprogs()
18911 func[i]->aux->name[0] = 'F'; in jit_subprogs()
18912 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; in jit_subprogs()
18913 func[i]->jit_requested = 1; in jit_subprogs()
18914 func[i]->blinding_requested = prog->blinding_requested; in jit_subprogs()
18915 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; in jit_subprogs()
18916 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; in jit_subprogs()
18917 func[i]->aux->linfo = prog->aux->linfo; in jit_subprogs()
18918 func[i]->aux->nr_linfo = prog->aux->nr_linfo; in jit_subprogs()
18919 func[i]->aux->jited_linfo = prog->aux->jited_linfo; in jit_subprogs()
18920 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; in jit_subprogs()
18922 insn = func[i]->insnsi; in jit_subprogs()
18923 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
18924 if (BPF_CLASS(insn->code) == BPF_LDX && in jit_subprogs()
18925 (BPF_MODE(insn->code) == BPF_PROBE_MEM || in jit_subprogs()
18926 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) in jit_subprogs()
18929 func[i]->aux->num_exentries = num_exentries; in jit_subprogs()
18930 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; in jit_subprogs()
18931 func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; in jit_subprogs()
18933 func[i]->aux->exception_boundary = env->seen_exception; in jit_subprogs()
18935 if (!func[i]->jited) { in jit_subprogs()
18936 err = -ENOTSUPP; in jit_subprogs()
18946 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
18947 insn = func[i]->insnsi; in jit_subprogs()
18948 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
18950 subprog = insn->off; in jit_subprogs()
18951 insn[0].imm = (u32)(long)func[subprog]->bpf_func; in jit_subprogs()
18952 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; in jit_subprogs()
18957 subprog = insn->off; in jit_subprogs()
18958 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); in jit_subprogs()
18972 func[i]->aux->func = func; in jit_subprogs()
18973 func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
18974 func[i]->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
18976 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
18977 old_bpf_func = func[i]->bpf_func; in jit_subprogs()
18979 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { in jit_subprogs()
18980 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); in jit_subprogs()
18981 err = -ENOTSUPP; in jit_subprogs()
18991 for (i = 1; i < env->subprog_cnt; i++) { in jit_subprogs()
19000 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
19002 insn[0].imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
19003 insn[1].imm = insn->off; in jit_subprogs()
19004 insn->off = 0; in jit_subprogs()
19009 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
19010 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
19011 insn->imm = subprog; in jit_subprogs()
19014 prog->jited = 1; in jit_subprogs()
19015 prog->bpf_func = func[0]->bpf_func; in jit_subprogs()
19016 prog->jited_len = func[0]->jited_len; in jit_subprogs()
19017 prog->aux->extable = func[0]->aux->extable; in jit_subprogs()
19018 prog->aux->num_exentries = func[0]->aux->num_exentries; in jit_subprogs()
19019 prog->aux->func = func; in jit_subprogs()
19020 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
19021 prog->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
19022 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; in jit_subprogs()
19023 prog->aux->exception_boundary = func[0]->aux->exception_boundary; in jit_subprogs()
19031 for (i = 0; i < prog->aux->size_poke_tab; i++) { in jit_subprogs()
19032 map_ptr = prog->aux->poke_tab[i].tail_call.map; in jit_subprogs()
19033 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); in jit_subprogs()
19039 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
19042 func[i]->aux->poke_tab = NULL; in jit_subprogs()
19048 prog->jit_requested = 0; in jit_subprogs()
19049 prog->blinding_requested = 0; in jit_subprogs()
19050 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
19053 insn->off = 0; in jit_subprogs()
19054 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
19063 struct bpf_prog *prog = env->prog; in fixup_call_args()
19064 struct bpf_insn *insn = prog->insnsi; in fixup_call_args()
19070 if (env->prog->jit_requested && in fixup_call_args()
19071 !bpf_prog_is_offloaded(env->prog->aux)) { in fixup_call_args()
19075 if (err == -EFAULT) in fixup_call_args()
19080 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); in fixup_call_args()
19081 return -EINVAL; in fixup_call_args()
19083 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { in fixup_call_args()
19087 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in fixup_call_args()
19088 return -EINVAL; in fixup_call_args()
19090 for (i = 0; i < prog->len; i++, insn++) { in fixup_call_args()
19095 verbose(env, "callbacks are not allowed in non-JITed programs\n"); in fixup_call_args()
19096 return -EINVAL; in fixup_call_args()
19115 struct bpf_prog *prog = env->prog; in specialize_kfunc()
19133 seen_direct_write = env->seen_direct_write; in specialize_kfunc()
19139 /* restore env->seen_direct_write to its original value, since in specialize_kfunc()
19142 env->seen_direct_write = seen_direct_write; in specialize_kfunc()
19153 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; in __fixup_collection_insert_kfunc()
19158 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); in __fixup_collection_insert_kfunc()
19168 if (!insn->imm) { in fixup_kfunc_call()
19170 return -EINVAL; in fixup_kfunc_call()
19175 /* insn->imm has the btf func_id. Replace it with an offset relative to in fixup_kfunc_call()
19179 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); in fixup_kfunc_call()
19182 insn->imm); in fixup_kfunc_call()
19183 return -EFAULT; in fixup_kfunc_call()
19187 insn->imm = BPF_CALL_IMM(desc->addr); in fixup_kfunc_call()
19188 if (insn->off) in fixup_kfunc_call()
19190 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || in fixup_kfunc_call()
19191 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { in fixup_kfunc_call()
19192 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
19194 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; in fixup_kfunc_call()
19196 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { in fixup_kfunc_call()
19199 return -EFAULT; in fixup_kfunc_call()
19207 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || in fixup_kfunc_call()
19208 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || in fixup_kfunc_call()
19209 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { in fixup_kfunc_call()
19210 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
19213 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { in fixup_kfunc_call()
19216 return -EFAULT; in fixup_kfunc_call()
19219 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in fixup_kfunc_call()
19223 return -EFAULT; in fixup_kfunc_call()
19230 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || in fixup_kfunc_call()
19231 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || in fixup_kfunc_call()
19232 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
19233 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
19237 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ in fixup_kfunc_call()
19238 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
19246 return -EFAULT; in fixup_kfunc_call()
19249 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, in fixup_kfunc_call()
19251 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || in fixup_kfunc_call()
19252 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { in fixup_kfunc_call()
19259 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
19262 struct bpf_subprog_info *info = env->subprog_info; in add_hidden_subprog()
19263 int cnt = env->subprog_cnt; in add_hidden_subprog()
19267 if (env->hidden_subprog_cnt) { in add_hidden_subprog()
19269 return -EFAULT; in add_hidden_subprog()
19273 * in bpf_patch_insn_data are no-ops. in add_hidden_subprog()
19275 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); in add_hidden_subprog()
19277 return -ENOMEM; in add_hidden_subprog()
19278 env->prog = prog; in add_hidden_subprog()
19280 info[cnt].start = prog->len - len + 1; in add_hidden_subprog()
19281 env->subprog_cnt++; in add_hidden_subprog()
19282 env->hidden_subprog_cnt++; in add_hidden_subprog()
19286 /* Do various post-verification rewrites in a single program pass.
19291 struct bpf_prog *prog = env->prog; in do_misc_fixups()
19292 enum bpf_attach_type eatype = prog->expected_attach_type; in do_misc_fixups()
19294 struct bpf_insn *insn = prog->insnsi; in do_misc_fixups()
19296 const int insn_cnt = prog->len; in do_misc_fixups()
19304 if (env->seen_exception && !env->exception_callback_subprog) { in do_misc_fixups()
19306 env->prog->insnsi[insn_cnt - 1], in do_misc_fixups()
19314 prog = env->prog; in do_misc_fixups()
19315 insn = prog->insnsi; in do_misc_fixups()
19317 env->exception_callback_subprog = env->subprog_cnt - 1; in do_misc_fixups()
19319 mark_subprog_exc_cb(env, env->exception_callback_subprog); in do_misc_fixups()
19323 /* Make divide-by-zero exceptions impossible. */ in do_misc_fixups()
19324 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || in do_misc_fixups()
19325 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || in do_misc_fixups()
19326 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || in do_misc_fixups()
19327 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { in do_misc_fixups()
19328 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
19329 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
19332 /* [R,W]x div 0 -> 0 */ in do_misc_fixups()
19334 BPF_JNE | BPF_K, insn->src_reg, in do_misc_fixups()
19336 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), in do_misc_fixups()
19341 /* [R,W]x mod 0 -> [R,W]x */ in do_misc_fixups()
19343 BPF_JEQ | BPF_K, insn->src_reg, in do_misc_fixups()
19347 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), in do_misc_fixups()
19352 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); in do_misc_fixups()
19356 return -ENOMEM; in do_misc_fixups()
19358 delta += cnt - 1; in do_misc_fixups()
19359 env->prog = prog = new_prog; in do_misc_fixups()
19360 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19365 if (BPF_CLASS(insn->code) == BPF_LD && in do_misc_fixups()
19366 (BPF_MODE(insn->code) == BPF_ABS || in do_misc_fixups()
19367 BPF_MODE(insn->code) == BPF_IND)) { in do_misc_fixups()
19368 cnt = env->ops->gen_ld_abs(insn, insn_buf); in do_misc_fixups()
19371 return -EINVAL; in do_misc_fixups()
19376 return -ENOMEM; in do_misc_fixups()
19378 delta += cnt - 1; in do_misc_fixups()
19379 env->prog = prog = new_prog; in do_misc_fixups()
19380 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19385 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || in do_misc_fixups()
19386 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { in do_misc_fixups()
19393 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
19394 if (!aux->alu_state || in do_misc_fixups()
19395 aux->alu_state == BPF_ALU_NON_POINTER) in do_misc_fixups()
19398 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; in do_misc_fixups()
19399 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == in do_misc_fixups()
19401 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; in do_misc_fixups()
19403 off_reg = issrc ? insn->src_reg : insn->dst_reg; in do_misc_fixups()
19405 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
19408 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
19409 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
19417 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); in do_misc_fixups()
19418 insn->src_reg = BPF_REG_AX; in do_misc_fixups()
19420 insn->code = insn->code == code_add ? in do_misc_fixups()
19424 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
19425 cnt = patch - insn_buf; in do_misc_fixups()
19429 return -ENOMEM; in do_misc_fixups()
19431 delta += cnt - 1; in do_misc_fixups()
19432 env->prog = prog = new_prog; in do_misc_fixups()
19433 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19437 if (insn->code != (BPF_JMP | BPF_CALL)) in do_misc_fixups()
19439 if (insn->src_reg == BPF_PSEUDO_CALL) in do_misc_fixups()
19441 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_misc_fixups()
19450 return -ENOMEM; in do_misc_fixups()
19452 delta += cnt - 1; in do_misc_fixups()
19453 env->prog = prog = new_prog; in do_misc_fixups()
19454 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19458 if (insn->imm == BPF_FUNC_get_route_realm) in do_misc_fixups()
19459 prog->dst_needed = 1; in do_misc_fixups()
19460 if (insn->imm == BPF_FUNC_get_prandom_u32) in do_misc_fixups()
19462 if (insn->imm == BPF_FUNC_override_return) in do_misc_fixups()
19463 prog->kprobe_override = 1; in do_misc_fixups()
19464 if (insn->imm == BPF_FUNC_tail_call) { in do_misc_fixups()
19470 prog->cb_access = 1; in do_misc_fixups()
19472 prog->aux->stack_depth = MAX_BPF_STACK; in do_misc_fixups()
19473 prog->aux->max_pkt_offset = MAX_PACKET_OFF; in do_misc_fixups()
19480 insn->imm = 0; in do_misc_fixups()
19481 insn->code = BPF_JMP | BPF_TAIL_CALL; in do_misc_fixups()
19483 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
19484 if (env->bpf_capable && !prog->blinding_requested && in do_misc_fixups()
19485 prog->jit_requested && in do_misc_fixups()
19491 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), in do_misc_fixups()
19502 insn->imm = ret + 1; in do_misc_fixups()
19512 * index &= array->index_mask; in do_misc_fixups()
19513 * to avoid out-of-bounds cpu speculation in do_misc_fixups()
19517 return -EINVAL; in do_misc_fixups()
19520 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); in do_misc_fixups()
19522 map_ptr->max_entries, 2); in do_misc_fixups()
19526 map)->index_mask); in do_misc_fixups()
19531 return -ENOMEM; in do_misc_fixups()
19533 delta += cnt - 1; in do_misc_fixups()
19534 env->prog = prog = new_prog; in do_misc_fixups()
19535 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19539 if (insn->imm == BPF_FUNC_timer_set_callback) { in do_misc_fixups()
19548 * Those that were not bpf_timer_init-ed will return -EINVAL. in do_misc_fixups()
19550 * Those that were not both bpf_timer_init-ed and in do_misc_fixups()
19551 * bpf_timer_set_callback-ed will return -EINVAL. in do_misc_fixups()
19554 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), in do_misc_fixups()
19564 return -ENOMEM; in do_misc_fixups()
19566 delta += cnt - 1; in do_misc_fixups()
19567 env->prog = prog = new_prog; in do_misc_fixups()
19568 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19572 if (is_storage_get_function(insn->imm)) { in do_misc_fixups()
19573 if (!env->prog->aux->sleepable || in do_misc_fixups()
19574 env->insn_aux_data[i + delta].storage_get_func_atomic) in do_misc_fixups()
19583 return -ENOMEM; in do_misc_fixups()
19585 delta += cnt - 1; in do_misc_fixups()
19586 env->prog = prog = new_prog; in do_misc_fixups()
19587 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19592 if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { in do_misc_fixups()
19602 return -ENOMEM; in do_misc_fixups()
19604 delta += cnt - 1; in do_misc_fixups()
19605 env->prog = prog = new_prog; in do_misc_fixups()
19606 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19614 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
19615 (insn->imm == BPF_FUNC_map_lookup_elem || in do_misc_fixups()
19616 insn->imm == BPF_FUNC_map_update_elem || in do_misc_fixups()
19617 insn->imm == BPF_FUNC_map_delete_elem || in do_misc_fixups()
19618 insn->imm == BPF_FUNC_map_push_elem || in do_misc_fixups()
19619 insn->imm == BPF_FUNC_map_pop_elem || in do_misc_fixups()
19620 insn->imm == BPF_FUNC_map_peek_elem || in do_misc_fixups()
19621 insn->imm == BPF_FUNC_redirect_map || in do_misc_fixups()
19622 insn->imm == BPF_FUNC_for_each_map_elem || in do_misc_fixups()
19623 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { in do_misc_fixups()
19624 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
19628 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); in do_misc_fixups()
19629 ops = map_ptr->ops; in do_misc_fixups()
19630 if (insn->imm == BPF_FUNC_map_lookup_elem && in do_misc_fixups()
19631 ops->map_gen_lookup) { in do_misc_fixups()
19632 cnt = ops->map_gen_lookup(map_ptr, insn_buf); in do_misc_fixups()
19633 if (cnt == -EOPNOTSUPP) in do_misc_fixups()
19637 return -EINVAL; in do_misc_fixups()
19643 return -ENOMEM; in do_misc_fixups()
19645 delta += cnt - 1; in do_misc_fixups()
19646 env->prog = prog = new_prog; in do_misc_fixups()
19647 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19651 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, in do_misc_fixups()
19653 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, in do_misc_fixups()
19655 BUILD_BUG_ON(!__same_type(ops->map_update_elem, in do_misc_fixups()
19658 BUILD_BUG_ON(!__same_type(ops->map_push_elem, in do_misc_fixups()
19661 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, in do_misc_fixups()
19663 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, in do_misc_fixups()
19665 BUILD_BUG_ON(!__same_type(ops->map_redirect, in do_misc_fixups()
19667 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, in do_misc_fixups()
19672 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, in do_misc_fixups()
19676 switch (insn->imm) { in do_misc_fixups()
19678 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); in do_misc_fixups()
19681 insn->imm = BPF_CALL_IMM(ops->map_update_elem); in do_misc_fixups()
19684 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); in do_misc_fixups()
19687 insn->imm = BPF_CALL_IMM(ops->map_push_elem); in do_misc_fixups()
19690 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); in do_misc_fixups()
19693 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); in do_misc_fixups()
19696 insn->imm = BPF_CALL_IMM(ops->map_redirect); in do_misc_fixups()
19699 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); in do_misc_fixups()
19702 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); in do_misc_fixups()
19710 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
19711 insn->imm == BPF_FUNC_jiffies64) { in do_misc_fixups()
19726 return -ENOMEM; in do_misc_fixups()
19728 delta += cnt - 1; in do_misc_fixups()
19729 env->prog = prog = new_prog; in do_misc_fixups()
19730 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19736 insn->imm == BPF_FUNC_get_func_arg) { in do_misc_fixups()
19737 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
19738 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
19746 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); in do_misc_fixups()
19751 return -ENOMEM; in do_misc_fixups()
19753 delta += cnt - 1; in do_misc_fixups()
19754 env->prog = prog = new_prog; in do_misc_fixups()
19755 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19761 insn->imm == BPF_FUNC_get_func_ret) { in do_misc_fixups()
19764 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
19765 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
19773 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); in do_misc_fixups()
19779 return -ENOMEM; in do_misc_fixups()
19781 delta += cnt - 1; in do_misc_fixups()
19782 env->prog = prog = new_prog; in do_misc_fixups()
19783 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19789 insn->imm == BPF_FUNC_get_func_arg_cnt) { in do_misc_fixups()
19790 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
19791 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
19795 return -ENOMEM; in do_misc_fixups()
19797 env->prog = prog = new_prog; in do_misc_fixups()
19798 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19804 insn->imm == BPF_FUNC_get_func_ip) { in do_misc_fixups()
19805 /* Load IP address from ctx - 16 */ in do_misc_fixups()
19806 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); in do_misc_fixups()
19810 return -ENOMEM; in do_misc_fixups()
19812 env->prog = prog = new_prog; in do_misc_fixups()
19813 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
19818 fn = env->ops->get_func_proto(insn->imm, env->prog); in do_misc_fixups()
19820 * programs to call them, must be real in-kernel functions in do_misc_fixups()
19822 if (!fn->func) { in do_misc_fixups()
19825 func_id_name(insn->imm), insn->imm); in do_misc_fixups()
19826 return -EFAULT; in do_misc_fixups()
19828 insn->imm = fn->func - __bpf_call_base; in do_misc_fixups()
19832 for (i = 0; i < prog->aux->size_poke_tab; i++) { in do_misc_fixups()
19833 map_ptr = prog->aux->poke_tab[i].tail_call.map; in do_misc_fixups()
19834 if (!map_ptr->ops->map_poke_track || in do_misc_fixups()
19835 !map_ptr->ops->map_poke_untrack || in do_misc_fixups()
19836 !map_ptr->ops->map_poke_run) { in do_misc_fixups()
19838 return -EINVAL; in do_misc_fixups()
19841 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); in do_misc_fixups()
19848 sort_kfunc_descs_by_imm_off(env->prog); in do_misc_fixups()
19879 BPF_MOV32_IMM(BPF_REG_0, -E2BIG), in inline_bpf_loop()
19902 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6), in inline_bpf_loop()
19919 callback_start = env->subprog_info[callback_subprogno].start; in inline_bpf_loop()
19922 callback_offset = callback_start - call_insn_offset - 1; in inline_bpf_loop()
19923 new_prog->insnsi[call_insn_offset].imm = callback_offset; in inline_bpf_loop()
19930 return insn->code == (BPF_JMP | BPF_CALL) && in is_bpf_loop_call()
19931 insn->src_reg == 0 && in is_bpf_loop_call()
19932 insn->imm == BPF_FUNC_loop; in is_bpf_loop_call()
19935 /* For all sub-programs in the program (including main) check
19946 struct bpf_subprog_info *subprogs = env->subprog_info; in optimize_bpf_loop()
19948 struct bpf_insn *insn = env->prog->insnsi; in optimize_bpf_loop()
19949 int insn_cnt = env->prog->len; in optimize_bpf_loop()
19951 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
19956 &env->insn_aux_data[i + delta].loop_inline_state; in optimize_bpf_loop()
19958 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { in optimize_bpf_loop()
19964 -(stack_depth + stack_depth_extra), in optimize_bpf_loop()
19965 inline_state->callback_subprogno, in optimize_bpf_loop()
19968 return -ENOMEM; in optimize_bpf_loop()
19970 delta += cnt - 1; in optimize_bpf_loop()
19971 env->prog = new_prog; in optimize_bpf_loop()
19972 insn = new_prog->insnsi + i + delta; in optimize_bpf_loop()
19979 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
19984 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in optimize_bpf_loop()
19994 sl = env->free_list; in free_states()
19996 sln = sl->next; in free_states()
19997 free_verifier_state(&sl->state, false); in free_states()
20001 env->free_list = NULL; in free_states()
20003 if (!env->explored_states) in free_states()
20007 sl = env->explored_states[i]; in free_states()
20010 sln = sl->next; in free_states()
20011 free_verifier_state(&sl->state, false); in free_states()
20015 env->explored_states[i] = NULL; in free_states()
20021 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check_common()
20027 env->prev_linfo = NULL; in do_check_common()
20028 env->pass_cnt++; in do_check_common()
20032 return -ENOMEM; in do_check_common()
20033 state->curframe = 0; in do_check_common()
20034 state->speculative = false; in do_check_common()
20035 state->branches = 1; in do_check_common()
20036 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); in do_check_common()
20037 if (!state->frame[0]) { in do_check_common()
20039 return -ENOMEM; in do_check_common()
20041 env->cur_state = state; in do_check_common()
20042 init_func_state(env, state->frame[0], in do_check_common()
20046 state->first_insn_idx = env->subprog_info[subprog].start; in do_check_common()
20047 state->last_insn_idx = -1; in do_check_common()
20050 regs = state->frame[state->curframe]->regs; in do_check_common()
20051 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { in do_check_common()
20062 state->frame[0]->in_exception_callback_fn = true; in do_check_common()
20067 if (sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_ANYTHING) { in do_check_common()
20069 ret = -EINVAL; in do_check_common()
20073 for (i = BPF_REG_1; i <= sub->arg_cnt; i++) { in do_check_common()
20074 arg = &sub->args[i - BPF_REG_1]; in do_check_common()
20077 if (arg->arg_type == ARG_PTR_TO_CTX) { in do_check_common()
20078 reg->type = PTR_TO_CTX; in do_check_common()
20080 } else if (arg->arg_type == ARG_ANYTHING) { in do_check_common()
20081 reg->type = SCALAR_VALUE; in do_check_common()
20083 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in do_check_common()
20085 __mark_dynptr_reg(reg, BPF_DYNPTR_TYPE_LOCAL, true, ++env->id_gen); in do_check_common()
20086 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in do_check_common()
20087 reg->type = PTR_TO_MEM; in do_check_common()
20088 if (arg->arg_type & PTR_MAYBE_NULL) in do_check_common()
20089 reg->type |= PTR_MAYBE_NULL; in do_check_common()
20091 reg->mem_size = arg->mem_size; in do_check_common()
20092 reg->id = ++env->id_gen; in do_check_common()
20095 i - BPF_REG_1, arg->arg_type); in do_check_common()
20096 ret = -EFAULT; in do_check_common()
20105 if (env->prog->aux->func_info_aux) { in do_check_common()
20107 if (ret || sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_PTR_TO_CTX) in do_check_common()
20108 env->prog->aux->func_info_aux[0].unreliable = true; in do_check_common()
20121 if (env->cur_state) { in do_check_common()
20122 free_verifier_state(env->cur_state, true); in do_check_common()
20123 env->cur_state = NULL; in do_check_common()
20127 bpf_vlog_reset(&env->log, 0); in do_check_common()
20154 struct bpf_prog_aux *aux = env->prog->aux; in do_check_subprogs()
20158 if (!aux->func_info) in do_check_subprogs()
20162 if (env->exception_callback_subprog) in do_check_subprogs()
20163 subprog_aux(env, env->exception_callback_subprog)->called = true; in do_check_subprogs()
20167 for (i = 1; i < env->subprog_cnt; i++) { in do_check_subprogs()
20172 if (!sub_aux->called || sub_aux->verified) in do_check_subprogs()
20175 env->insn_idx = env->subprog_info[i].start; in do_check_subprogs()
20176 WARN_ON_ONCE(env->insn_idx == 0); in do_check_subprogs()
20180 } else if (env->log.level & BPF_LOG_LEVEL) { in do_check_subprogs()
20189 sub_aux->verified = true; in do_check_subprogs()
20206 env->insn_idx = 0; in do_check_main()
20209 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check_main()
20218 if (env->log.level & BPF_LOG_STATS) { in print_verification_stats()
20220 div_u64(env->verification_time, 1000)); in print_verification_stats()
20222 for (i = 0; i < env->subprog_cnt; i++) { in print_verification_stats()
20223 u32 depth = env->subprog_info[i].stack_depth; in print_verification_stats()
20226 if (i + 1 < env->subprog_cnt) in print_verification_stats()
20233 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, in print_verification_stats()
20234 env->max_states_per_insn, env->total_states, in print_verification_stats()
20235 env->peak_states, env->longest_mark_read_walk); in print_verification_stats()
20243 struct bpf_prog *prog = env->prog; in check_struct_ops_btf_id()
20247 if (!prog->gpl_compatible) { in check_struct_ops_btf_id()
20249 return -EINVAL; in check_struct_ops_btf_id()
20252 btf_id = prog->aux->attach_btf_id; in check_struct_ops_btf_id()
20257 return -ENOTSUPP; in check_struct_ops_btf_id()
20260 t = st_ops->type; in check_struct_ops_btf_id()
20261 member_idx = prog->expected_attach_type; in check_struct_ops_btf_id()
20264 member_idx, st_ops->name); in check_struct_ops_btf_id()
20265 return -EINVAL; in check_struct_ops_btf_id()
20269 mname = btf_name_by_offset(btf_vmlinux, member->name_off); in check_struct_ops_btf_id()
20270 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, in check_struct_ops_btf_id()
20274 mname, member_idx, st_ops->name); in check_struct_ops_btf_id()
20275 return -EINVAL; in check_struct_ops_btf_id()
20278 if (st_ops->check_member) { in check_struct_ops_btf_id()
20279 int err = st_ops->check_member(t, member, prog); in check_struct_ops_btf_id()
20283 mname, st_ops->name); in check_struct_ops_btf_id()
20288 prog->aux->attach_func_proto = func_proto; in check_struct_ops_btf_id()
20289 prog->aux->attach_func_name = mname; in check_struct_ops_btf_id()
20290 env->ops = st_ops->verifier_ops; in check_struct_ops_btf_id()
20299 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) in check_attach_modify_return()
20302 return -EINVAL; in check_attach_modify_return()
20305 /* list of non-sleepable functions that are otherwise on
20309 /* Three functions below can be called from sleepable and non-sleepable context.
20310 * Assume non-sleepable from bpf safety point of view.
20328 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; in bpf_check_attach_target()
20329 bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING; in bpf_check_attach_target()
20331 int ret = 0, subprog = -1, i; in bpf_check_attach_target()
20341 return -EINVAL; in bpf_check_attach_target()
20343 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; in bpf_check_attach_target()
20347 return -EINVAL; in bpf_check_attach_target()
20352 return -EINVAL; in bpf_check_attach_target()
20354 tname = btf_name_by_offset(btf, t->name_off); in bpf_check_attach_target()
20357 return -EINVAL; in bpf_check_attach_target()
20360 struct bpf_prog_aux *aux = tgt_prog->aux; in bpf_check_attach_target()
20362 if (bpf_prog_is_dev_bound(prog->aux) && in bpf_check_attach_target()
20365 return -EINVAL; in bpf_check_attach_target()
20368 for (i = 0; i < aux->func_info_cnt; i++) in bpf_check_attach_target()
20369 if (aux->func_info[i].type_id == btf_id) { in bpf_check_attach_target()
20373 if (subprog == -1) { in bpf_check_attach_target()
20375 return -EINVAL; in bpf_check_attach_target()
20377 if (aux->func && aux->func[subprog]->aux->exception_cb) { in bpf_check_attach_target()
20381 return -EINVAL; in bpf_check_attach_target()
20383 conservative = aux->func_info_aux[subprog].unreliable; in bpf_check_attach_target()
20388 return -EINVAL; in bpf_check_attach_target()
20390 if (!prog->jit_requested) { in bpf_check_attach_target()
20393 return -EINVAL; in bpf_check_attach_target()
20396 if (!tgt_prog->jited) { in bpf_check_attach_target()
20398 return -EINVAL; in bpf_check_attach_target()
20401 if (aux->attach_tracing_prog) { in bpf_check_attach_target()
20408 return -EINVAL; in bpf_check_attach_target()
20410 } else if (tgt_prog->type == prog->type) { in bpf_check_attach_target()
20417 return -EINVAL; in bpf_check_attach_target()
20419 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && in bpf_check_attach_target()
20421 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || in bpf_check_attach_target()
20422 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { in bpf_check_attach_target()
20433 * long call chain fentry->extension->fentry->extension in bpf_check_attach_target()
20438 return -EINVAL; in bpf_check_attach_target()
20443 return -EINVAL; in bpf_check_attach_target()
20447 switch (prog->expected_attach_type) { in bpf_check_attach_target()
20452 return -EINVAL; in bpf_check_attach_target()
20457 return -EINVAL; in bpf_check_attach_target()
20459 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { in bpf_check_attach_target()
20462 return -EINVAL; in bpf_check_attach_target()
20464 tname += sizeof(prefix) - 1; in bpf_check_attach_target()
20465 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
20468 return -EINVAL; in bpf_check_attach_target()
20469 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
20472 return -EINVAL; in bpf_check_attach_target()
20479 return -EINVAL; in bpf_check_attach_target()
20481 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
20483 return -EINVAL; in bpf_check_attach_target()
20484 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
20490 return -EINVAL; in bpf_check_attach_target()
20500 return -EINVAL; in bpf_check_attach_target()
20504 return -EINVAL; in bpf_check_attach_target()
20505 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
20507 return -EINVAL; in bpf_check_attach_target()
20509 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && in bpf_check_attach_target()
20510 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || in bpf_check_attach_target()
20511 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) in bpf_check_attach_target()
20512 return -EINVAL; in bpf_check_attach_target()
20517 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
20523 addr = (long) tgt_prog->bpf_func; in bpf_check_attach_target()
20525 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; in bpf_check_attach_target()
20541 return -ENOENT; in bpf_check_attach_target()
20545 if (prog->aux->sleepable) { in bpf_check_attach_target()
20546 ret = -EINVAL; in bpf_check_attach_target()
20547 switch (prog->type) { in bpf_check_attach_target()
20582 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { in bpf_check_attach_target()
20586 return -EINVAL; in bpf_check_attach_target()
20588 ret = -EINVAL; in bpf_check_attach_target()
20601 tgt_info->tgt_addr = addr; in bpf_check_attach_target()
20602 tgt_info->tgt_name = tname; in bpf_check_attach_target()
20603 tgt_info->tgt_type = t; in bpf_check_attach_target()
20604 tgt_info->tgt_mod = mod; in bpf_check_attach_target()
20629 if (prog->type == BPF_PROG_TYPE_TRACING) { in BTF_SET_START()
20630 switch (prog->expected_attach_type) { in BTF_SET_START()
20640 return prog->type == BPF_PROG_TYPE_LSM || in BTF_SET_START()
20641 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || in BTF_SET_START()
20642 prog->type == BPF_PROG_TYPE_STRUCT_OPS; in BTF_SET_START()
20647 struct bpf_prog *prog = env->prog; in check_attach_btf_id()
20648 struct bpf_prog *tgt_prog = prog->aux->dst_prog; in check_attach_btf_id()
20650 u32 btf_id = prog->aux->attach_btf_id; in check_attach_btf_id()
20655 if (prog->type == BPF_PROG_TYPE_SYSCALL) { in check_attach_btf_id()
20656 if (prog->aux->sleepable) in check_attach_btf_id()
20660 return -EINVAL; in check_attach_btf_id()
20663 if (prog->aux->sleepable && !can_be_sleepable(prog)) { in check_attach_btf_id()
20665 return -EINVAL; in check_attach_btf_id()
20668 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) in check_attach_btf_id()
20671 if (prog->type != BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
20672 prog->type != BPF_PROG_TYPE_LSM && in check_attach_btf_id()
20673 prog->type != BPF_PROG_TYPE_EXT) in check_attach_btf_id()
20676 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); in check_attach_btf_id()
20680 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { in check_attach_btf_id()
20682 * inherit env->ops and expected_attach_type for the rest of the in check_attach_btf_id()
20685 env->ops = bpf_verifier_ops[tgt_prog->type]; in check_attach_btf_id()
20686 prog->expected_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
20690 prog->aux->attach_func_proto = tgt_info.tgt_type; in check_attach_btf_id()
20691 prog->aux->attach_func_name = tgt_info.tgt_name; in check_attach_btf_id()
20692 prog->aux->mod = tgt_info.tgt_mod; in check_attach_btf_id()
20695 prog->aux->saved_dst_prog_type = tgt_prog->type; in check_attach_btf_id()
20696 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
20699 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { in check_attach_btf_id()
20700 prog->aux->attach_btf_trace = true; in check_attach_btf_id()
20702 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { in check_attach_btf_id()
20704 return -EINVAL; in check_attach_btf_id()
20708 if (prog->type == BPF_PROG_TYPE_LSM) { in check_attach_btf_id()
20709 ret = bpf_lsm_verify_prog(&env->log, prog); in check_attach_btf_id()
20712 } else if (prog->type == BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
20714 return -EINVAL; in check_attach_btf_id()
20717 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); in check_attach_btf_id()
20720 return -ENOMEM; in check_attach_btf_id()
20722 if (tgt_prog && tgt_prog->aux->tail_call_reachable) in check_attach_btf_id()
20723 tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; in check_attach_btf_id()
20725 prog->aux->dst_trampoline = tr; in check_attach_btf_id()
20744 int i, len, ret = -EINVAL, err; in bpf_check()
20750 return -EINVAL; in bpf_check()
20757 return -ENOMEM; in bpf_check()
20759 env->bt.env = env; in bpf_check()
20761 len = (*prog)->len; in bpf_check()
20762 env->insn_aux_data = in bpf_check()
20764 ret = -ENOMEM; in bpf_check()
20765 if (!env->insn_aux_data) in bpf_check()
20768 env->insn_aux_data[i].orig_idx = i; in bpf_check()
20769 env->prog = *prog; in bpf_check()
20770 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
20771 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); in bpf_check()
20783 ret = bpf_vlog_init(&env->log, attr->log_level, in bpf_check()
20784 (char __user *) (unsigned long) attr->log_buf, in bpf_check()
20785 attr->log_size); in bpf_check()
20793 verbose(env, "in-kernel BTF is malformed\n"); in bpf_check()
20798 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); in bpf_check()
20800 env->strict_alignment = true; in bpf_check()
20801 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) in bpf_check()
20802 env->strict_alignment = false; in bpf_check()
20804 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); in bpf_check()
20805 env->allow_uninit_stack = bpf_allow_uninit_stack(); in bpf_check()
20806 env->bypass_spec_v1 = bpf_bypass_spec_v1(); in bpf_check()
20807 env->bypass_spec_v4 = bpf_bypass_spec_v4(); in bpf_check()
20808 env->bpf_capable = bpf_capable(); in bpf_check()
20811 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; in bpf_check()
20812 env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS; in bpf_check()
20814 env->explored_states = kvcalloc(state_htab_size(env), in bpf_check()
20817 ret = -ENOMEM; in bpf_check()
20818 if (!env->explored_states) in bpf_check()
20845 if (bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
20846 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
20858 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) in bpf_check()
20862 kvfree(env->explored_states); in bpf_check()
20890 /* do 32-bit optimization after insn patching has done so those patched in bpf_check()
20893 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
20895 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
20902 env->verification_time = ktime_get_ns() - start_time; in bpf_check()
20904 env->prog->aux->verified_insns = env->insn_processed; in bpf_check()
20907 err = bpf_vlog_finalize(&env->log, &log_true_size); in bpf_check()
20914 ret = -EFAULT; in bpf_check()
20921 if (env->used_map_cnt) { in bpf_check()
20923 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
20924 sizeof(env->used_maps[0]), in bpf_check()
20927 if (!env->prog->aux->used_maps) { in bpf_check()
20928 ret = -ENOMEM; in bpf_check()
20932 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
20933 sizeof(env->used_maps[0]) * env->used_map_cnt); in bpf_check()
20934 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
20936 if (env->used_btf_cnt) { in bpf_check()
20938 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, in bpf_check()
20939 sizeof(env->used_btfs[0]), in bpf_check()
20941 if (!env->prog->aux->used_btfs) { in bpf_check()
20942 ret = -ENOMEM; in bpf_check()
20946 memcpy(env->prog->aux->used_btfs, env->used_btfs, in bpf_check()
20947 sizeof(env->used_btfs[0]) * env->used_btf_cnt); in bpf_check()
20948 env->prog->aux->used_btf_cnt = env->used_btf_cnt; in bpf_check()
20950 if (env->used_map_cnt || env->used_btf_cnt) { in bpf_check()
20960 if (!env->prog->aux->used_maps) in bpf_check()
20965 if (!env->prog->aux->used_btfs) in bpf_check()
20971 if (env->prog->type == BPF_PROG_TYPE_EXT) in bpf_check()
20972 env->prog->expected_attach_type = 0; in bpf_check()
20974 *prog = env->prog; in bpf_check()
20978 vfree(env->insn_aux_data); in bpf_check()