Lines Matching +full:reg +full:- +full:names
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
22 #include <linux/error-injection.h>
43 * The first pass is depth-first-search to check that the program is a DAG.
45 * - larger than BPF_MAXINSNS insns
46 * - if loop is present (detected via back-edge)
47 * - unreachable insns exist (shouldn't be a forest. program = one function)
48 * - out of bounds or malformed jumps
60 * All registers are 64-bit.
61 * R0 - return register
62 * R1-R5 argument passing registers
63 * R6-R9 callee saved registers
64 * R10 - frame pointer read-only
71 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
76 * (and -20 constant is saved for further stack bounds checking).
77 * Meaning that this reg is a pointer to stack plus known immediate constant.
116 * [key, key + map->key_size) bytes are valid and were initialized on
122 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
126 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
127 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
129 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
134 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
137 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
142 * After the call R0 is set to return type of the function and registers R1-R5
148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
154 * passes through a NULL-check conditional. For the branch wherein the state is
191 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; in bpf_map_ptr_poisoned()
196 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; in bpf_map_ptr_unpriv()
204 aux->map_ptr_state = (unsigned long)map | in bpf_map_ptr_store()
210 return aux->map_key_state & BPF_MAP_KEY_POISON; in bpf_map_key_poisoned()
215 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); in bpf_map_key_unseen()
220 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); in bpf_map_key_immediate()
227 aux->map_key_state = state | BPF_MAP_KEY_SEEN | in bpf_map_key_store()
256 prog = env->prog; in find_linfo()
257 nr_linfo = prog->aux->nr_linfo; in find_linfo()
259 if (!nr_linfo || insn_off >= prog->len) in find_linfo()
262 linfo = prog->aux->linfo; in find_linfo()
267 return &linfo[i - 1]; in find_linfo()
275 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); in bpf_verifier_vlog()
277 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, in bpf_verifier_vlog()
278 "verifier log line truncated - local buffer too short\n"); in bpf_verifier_vlog()
280 n = min(log->len_total - log->len_used - 1, n); in bpf_verifier_vlog()
281 log->kbuf[n] = '\0'; in bpf_verifier_vlog()
283 if (log->level == BPF_LOG_KERNEL) { in bpf_verifier_vlog()
284 pr_err("BPF:%s\n", log->kbuf); in bpf_verifier_vlog()
287 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) in bpf_verifier_vlog()
288 log->len_used += n; in bpf_verifier_vlog()
290 log->ubuf = NULL; in bpf_verifier_vlog()
300 log->len_used = new_pos; in bpf_vlog_reset()
301 if (put_user(zero, log->ubuf + new_pos)) in bpf_vlog_reset()
302 log->ubuf = NULL; in bpf_vlog_reset()
314 if (!bpf_verifier_log_needed(&env->log)) in bpf_verifier_log_write()
318 bpf_verifier_vlog(&env->log, fmt, args); in bpf_verifier_log_write()
328 if (!bpf_verifier_log_needed(&env->log)) in verbose()
332 bpf_verifier_vlog(&env->log, fmt, args); in verbose()
363 if (!bpf_verifier_log_needed(&env->log)) in verbose_linfo()
367 if (!linfo || linfo == env->prev_linfo) in verbose_linfo()
374 bpf_verifier_vlog(&env->log, prefix_fmt, args); in verbose_linfo()
379 ltrim(btf_name_by_offset(env->prog->aux->btf, in verbose_linfo()
380 linfo->line_off))); in verbose_linfo()
382 env->prev_linfo = linfo; in verbose_linfo()
419 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) in reg_may_point_to_spin_lock() argument
421 return reg->type == PTR_TO_MAP_VALUE && in reg_may_point_to_spin_lock()
422 map_value_has_spin_lock(reg->map_ptr); in reg_may_point_to_spin_lock()
472 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; in is_acquire_function()
552 const struct bpf_reg_state *reg) in func() argument
554 struct bpf_verifier_state *cur = env->cur_state; in func()
556 return cur->frame[reg->frameno]; in func()
562 btf_type_by_id(btf_vmlinux, id)->name_off); in kernel_type_name()
568 const struct bpf_reg_state *reg; in print_verifier_state() local
572 if (state->frameno) in print_verifier_state()
573 verbose(env, " frame%d:", state->frameno); in print_verifier_state()
575 reg = &state->regs[i]; in print_verifier_state()
576 t = reg->type; in print_verifier_state()
580 print_liveness(env, reg->live); in print_verifier_state()
582 if (t == SCALAR_VALUE && reg->precise) in print_verifier_state()
585 tnum_is_const(reg->var_off)) { in print_verifier_state()
586 /* reg->off should be 0 for SCALAR_VALUE */ in print_verifier_state()
587 verbose(env, "%lld", reg->var_off.value + reg->off); in print_verifier_state()
592 verbose(env, "%s", kernel_type_name(reg->btf_id)); in print_verifier_state()
593 verbose(env, "(id=%d", reg->id); in print_verifier_state()
595 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); in print_verifier_state()
597 verbose(env, ",off=%d", reg->off); in print_verifier_state()
599 verbose(env, ",r=%d", reg->range); in print_verifier_state()
604 reg->map_ptr->key_size, in print_verifier_state()
605 reg->map_ptr->value_size); in print_verifier_state()
606 if (tnum_is_const(reg->var_off)) { in print_verifier_state()
609 * for reg->off in print_verifier_state()
611 verbose(env, ",imm=%llx", reg->var_off.value); in print_verifier_state()
613 if (reg->smin_value != reg->umin_value && in print_verifier_state()
614 reg->smin_value != S64_MIN) in print_verifier_state()
616 (long long)reg->smin_value); in print_verifier_state()
617 if (reg->smax_value != reg->umax_value && in print_verifier_state()
618 reg->smax_value != S64_MAX) in print_verifier_state()
620 (long long)reg->smax_value); in print_verifier_state()
621 if (reg->umin_value != 0) in print_verifier_state()
623 (unsigned long long)reg->umin_value); in print_verifier_state()
624 if (reg->umax_value != U64_MAX) in print_verifier_state()
626 (unsigned long long)reg->umax_value); in print_verifier_state()
627 if (!tnum_is_unknown(reg->var_off)) { in print_verifier_state()
630 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in print_verifier_state()
633 if (reg->s32_min_value != reg->smin_value && in print_verifier_state()
634 reg->s32_min_value != S32_MIN) in print_verifier_state()
636 (int)(reg->s32_min_value)); in print_verifier_state()
637 if (reg->s32_max_value != reg->smax_value && in print_verifier_state()
638 reg->s32_max_value != S32_MAX) in print_verifier_state()
640 (int)(reg->s32_max_value)); in print_verifier_state()
641 if (reg->u32_min_value != reg->umin_value && in print_verifier_state()
642 reg->u32_min_value != U32_MIN) in print_verifier_state()
644 (int)(reg->u32_min_value)); in print_verifier_state()
645 if (reg->u32_max_value != reg->umax_value && in print_verifier_state()
646 reg->u32_max_value != U32_MAX) in print_verifier_state()
648 (int)(reg->u32_max_value)); in print_verifier_state()
653 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
659 if (state->stack[i].slot_type[j] != STACK_INVALID) in print_verifier_state()
662 state->stack[i].slot_type[j]]; in print_verifier_state()
667 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
668 print_liveness(env, state->stack[i].spilled_ptr.live); in print_verifier_state()
669 if (state->stack[i].slot_type[0] == STACK_SPILL) { in print_verifier_state()
670 reg = &state->stack[i].spilled_ptr; in print_verifier_state()
671 t = reg->type; in print_verifier_state()
673 if (t == SCALAR_VALUE && reg->precise) in print_verifier_state()
675 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) in print_verifier_state()
676 verbose(env, "%lld", reg->var_off.value + reg->off); in print_verifier_state()
681 if (state->acquired_refs && state->refs[0].id) { in print_verifier_state()
682 verbose(env, " refs=%d", state->refs[0].id); in print_verifier_state()
683 for (i = 1; i < state->acquired_refs; i++) in print_verifier_state()
684 if (state->refs[i].id) in print_verifier_state()
685 verbose(env, ",%d", state->refs[i].id); in print_verifier_state()
694 if (!src->FIELD) \
696 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
699 return -EFAULT; \
701 memcpy(dst->FIELD, src->FIELD, \
702 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
715 u32 old_size = state->COUNT; \ in COPY_STATE_FN()
722 state->COUNT = slot * SIZE; \ in COPY_STATE_FN()
724 kfree(state->FIELD); \ in COPY_STATE_FN()
725 state->FIELD = NULL; \ in COPY_STATE_FN()
732 return -ENOMEM; \ in COPY_STATE_FN()
734 if (state->FIELD) \ in COPY_STATE_FN()
735 memcpy(new_##FIELD, state->FIELD, \ in COPY_STATE_FN()
738 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ in COPY_STATE_FN()
740 state->COUNT = slot * SIZE; \ in COPY_STATE_FN()
741 kfree(state->FIELD); \ in COPY_STATE_FN()
742 state->FIELD = new_##FIELD; \ in COPY_STATE_FN()
751 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
754 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
767 /* Acquire a pointer id from the env and update the state->refs to include
775 int new_ofs = state->acquired_refs; in acquire_reference_state()
778 err = realloc_reference_state(state, state->acquired_refs + 1, true); in acquire_reference_state()
781 id = ++env->id_gen; in acquire_reference_state()
782 state->refs[new_ofs].id = id; in acquire_reference_state()
783 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
793 last_idx = state->acquired_refs - 1; in release_reference_state()
794 for (i = 0; i < state->acquired_refs; i++) { in release_reference_state()
795 if (state->refs[i].id == ptr_id) { in release_reference_state()
797 memcpy(&state->refs[i], &state->refs[last_idx], in release_reference_state()
798 sizeof(*state->refs)); in release_reference_state()
799 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); in release_reference_state()
800 state->acquired_refs--; in release_reference_state()
804 return -EINVAL; in release_reference_state()
810 int err = realloc_reference_state(dst, src->acquired_refs, false); in transfer_reference_state()
823 kfree(state->refs); in free_func_state()
824 kfree(state->stack); in free_func_state()
830 kfree(state->jmp_history); in clear_jmp_history()
831 state->jmp_history = NULL; in clear_jmp_history()
832 state->jmp_history_cnt = 0; in clear_jmp_history()
840 for (i = 0; i <= state->curframe; i++) { in free_verifier_state()
841 free_func_state(state->frame[i]); in free_verifier_state()
842 state->frame[i] = NULL; in free_verifier_state()
857 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, in copy_func_state()
872 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; in copy_verifier_state()
875 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { in copy_verifier_state()
876 kfree(dst_state->jmp_history); in copy_verifier_state()
877 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); in copy_verifier_state()
878 if (!dst_state->jmp_history) in copy_verifier_state()
879 return -ENOMEM; in copy_verifier_state()
881 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); in copy_verifier_state()
882 dst_state->jmp_history_cnt = src->jmp_history_cnt; in copy_verifier_state()
885 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { in copy_verifier_state()
886 free_func_state(dst_state->frame[i]); in copy_verifier_state()
887 dst_state->frame[i] = NULL; in copy_verifier_state()
889 dst_state->speculative = src->speculative; in copy_verifier_state()
890 dst_state->curframe = src->curframe; in copy_verifier_state()
891 dst_state->active_spin_lock = src->active_spin_lock; in copy_verifier_state()
892 dst_state->branches = src->branches; in copy_verifier_state()
893 dst_state->parent = src->parent; in copy_verifier_state()
894 dst_state->first_insn_idx = src->first_insn_idx; in copy_verifier_state()
895 dst_state->last_insn_idx = src->last_insn_idx; in copy_verifier_state()
896 for (i = 0; i <= src->curframe; i++) { in copy_verifier_state()
897 dst = dst_state->frame[i]; in copy_verifier_state()
901 return -ENOMEM; in copy_verifier_state()
902 dst_state->frame[i] = dst; in copy_verifier_state()
904 err = copy_func_state(dst, src->frame[i]); in copy_verifier_state()
914 u32 br = --st->branches; in update_branch_counts()
924 st = st->parent; in update_branch_counts()
931 struct bpf_verifier_state *cur = env->cur_state; in pop_stack()
932 struct bpf_verifier_stack_elem *elem, *head = env->head; in pop_stack()
935 if (env->head == NULL) in pop_stack()
936 return -ENOENT; in pop_stack()
939 err = copy_verifier_state(cur, &head->st); in pop_stack()
944 bpf_vlog_reset(&env->log, head->log_pos); in pop_stack()
946 *insn_idx = head->insn_idx; in pop_stack()
948 *prev_insn_idx = head->prev_insn_idx; in pop_stack()
949 elem = head->next; in pop_stack()
950 free_verifier_state(&head->st, false); in pop_stack()
952 env->head = elem; in pop_stack()
953 env->stack_size--; in pop_stack()
961 struct bpf_verifier_state *cur = env->cur_state; in push_stack()
969 elem->insn_idx = insn_idx; in push_stack()
970 elem->prev_insn_idx = prev_insn_idx; in push_stack()
971 elem->next = env->head; in push_stack()
972 elem->log_pos = env->log.len_used; in push_stack()
973 env->head = elem; in push_stack()
974 env->stack_size++; in push_stack()
975 err = copy_verifier_state(&elem->st, cur); in push_stack()
978 elem->st.speculative |= speculative; in push_stack()
979 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_stack()
981 env->stack_size); in push_stack()
984 if (elem->st.parent) { in push_stack()
985 ++elem->st.parent->branches; in push_stack()
988 * 1. speculative states will bump 'branches' for non-branch in push_stack()
996 return &elem->st; in push_stack()
998 free_verifier_state(env->cur_state, true); in push_stack()
999 env->cur_state = NULL; in push_stack()
1011 struct bpf_reg_state *reg);
1013 /* This helper doesn't clear reg->id */
1014 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) in ___mark_reg_known() argument
1016 reg->var_off = tnum_const(imm); in ___mark_reg_known()
1017 reg->smin_value = (s64)imm; in ___mark_reg_known()
1018 reg->smax_value = (s64)imm; in ___mark_reg_known()
1019 reg->umin_value = imm; in ___mark_reg_known()
1020 reg->umax_value = imm; in ___mark_reg_known()
1022 reg->s32_min_value = (s32)imm; in ___mark_reg_known()
1023 reg->s32_max_value = (s32)imm; in ___mark_reg_known()
1024 reg->u32_min_value = (u32)imm; in ___mark_reg_known()
1025 reg->u32_max_value = (u32)imm; in ___mark_reg_known()
1031 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) in __mark_reg_known() argument
1034 memset(((u8 *)reg) + sizeof(reg->type), 0, in __mark_reg_known()
1035 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); in __mark_reg_known()
1036 ___mark_reg_known(reg, imm); in __mark_reg_known()
1039 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) in __mark_reg32_known() argument
1041 reg->var_off = tnum_const_subreg(reg->var_off, imm); in __mark_reg32_known()
1042 reg->s32_min_value = (s32)imm; in __mark_reg32_known()
1043 reg->s32_max_value = (s32)imm; in __mark_reg32_known()
1044 reg->u32_min_value = (u32)imm; in __mark_reg32_known()
1045 reg->u32_max_value = (u32)imm; in __mark_reg32_known()
1051 static void __mark_reg_known_zero(struct bpf_reg_state *reg) in __mark_reg_known_zero() argument
1053 __mark_reg_known(reg, 0); in __mark_reg_known_zero()
1056 static void __mark_reg_const_zero(struct bpf_reg_state *reg) in __mark_reg_const_zero() argument
1058 __mark_reg_known(reg, 0); in __mark_reg_const_zero()
1059 reg->type = SCALAR_VALUE; in __mark_reg_const_zero()
1075 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) in reg_is_pkt_pointer() argument
1077 return type_is_pkt_pointer(reg->type); in reg_is_pkt_pointer()
1080 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) in reg_is_pkt_pointer_any() argument
1082 return reg_is_pkt_pointer(reg) || in reg_is_pkt_pointer_any()
1083 reg->type == PTR_TO_PACKET_END; in reg_is_pkt_pointer_any()
1087 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, in reg_is_init_pkt_pointer() argument
1094 return reg->type == which && in reg_is_init_pkt_pointer()
1095 reg->id == 0 && in reg_is_init_pkt_pointer()
1096 reg->off == 0 && in reg_is_init_pkt_pointer()
1097 tnum_equals_const(reg->var_off, 0); in reg_is_init_pkt_pointer()
1101 static void __mark_reg_unbounded(struct bpf_reg_state *reg) in __mark_reg_unbounded() argument
1103 reg->smin_value = S64_MIN; in __mark_reg_unbounded()
1104 reg->smax_value = S64_MAX; in __mark_reg_unbounded()
1105 reg->umin_value = 0; in __mark_reg_unbounded()
1106 reg->umax_value = U64_MAX; in __mark_reg_unbounded()
1108 reg->s32_min_value = S32_MIN; in __mark_reg_unbounded()
1109 reg->s32_max_value = S32_MAX; in __mark_reg_unbounded()
1110 reg->u32_min_value = 0; in __mark_reg_unbounded()
1111 reg->u32_max_value = U32_MAX; in __mark_reg_unbounded()
1114 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) in __mark_reg64_unbounded() argument
1116 reg->smin_value = S64_MIN; in __mark_reg64_unbounded()
1117 reg->smax_value = S64_MAX; in __mark_reg64_unbounded()
1118 reg->umin_value = 0; in __mark_reg64_unbounded()
1119 reg->umax_value = U64_MAX; in __mark_reg64_unbounded()
1122 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) in __mark_reg32_unbounded() argument
1124 reg->s32_min_value = S32_MIN; in __mark_reg32_unbounded()
1125 reg->s32_max_value = S32_MAX; in __mark_reg32_unbounded()
1126 reg->u32_min_value = 0; in __mark_reg32_unbounded()
1127 reg->u32_max_value = U32_MAX; in __mark_reg32_unbounded()
1130 static void __update_reg32_bounds(struct bpf_reg_state *reg) in __update_reg32_bounds() argument
1132 struct tnum var32_off = tnum_subreg(reg->var_off); in __update_reg32_bounds()
1135 reg->s32_min_value = max_t(s32, reg->s32_min_value, in __update_reg32_bounds()
1138 reg->s32_max_value = min_t(s32, reg->s32_max_value, in __update_reg32_bounds()
1140 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); in __update_reg32_bounds()
1141 reg->u32_max_value = min(reg->u32_max_value, in __update_reg32_bounds()
1145 static void __update_reg64_bounds(struct bpf_reg_state *reg) in __update_reg64_bounds() argument
1148 reg->smin_value = max_t(s64, reg->smin_value, in __update_reg64_bounds()
1149 reg->var_off.value | (reg->var_off.mask & S64_MIN)); in __update_reg64_bounds()
1151 reg->smax_value = min_t(s64, reg->smax_value, in __update_reg64_bounds()
1152 reg->var_off.value | (reg->var_off.mask & S64_MAX)); in __update_reg64_bounds()
1153 reg->umin_value = max(reg->umin_value, reg->var_off.value); in __update_reg64_bounds()
1154 reg->umax_value = min(reg->umax_value, in __update_reg64_bounds()
1155 reg->var_off.value | reg->var_off.mask); in __update_reg64_bounds()
1158 static void __update_reg_bounds(struct bpf_reg_state *reg) in __update_reg_bounds() argument
1160 __update_reg32_bounds(reg); in __update_reg_bounds()
1161 __update_reg64_bounds(reg); in __update_reg_bounds()
1164 /* Uses signed min/max values to inform unsigned, and vice-versa */
1165 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) in __reg32_deduce_bounds() argument
1170 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg32_deduce_bounds()
1172 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { in __reg32_deduce_bounds()
1173 reg->s32_min_value = reg->u32_min_value = in __reg32_deduce_bounds()
1174 max_t(u32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
1175 reg->s32_max_value = reg->u32_max_value = in __reg32_deduce_bounds()
1176 min_t(u32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
1182 if ((s32)reg->u32_max_value >= 0) { in __reg32_deduce_bounds()
1186 reg->s32_min_value = reg->u32_min_value; in __reg32_deduce_bounds()
1187 reg->s32_max_value = reg->u32_max_value = in __reg32_deduce_bounds()
1188 min_t(u32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
1189 } else if ((s32)reg->u32_min_value < 0) { in __reg32_deduce_bounds()
1193 reg->s32_min_value = reg->u32_min_value = in __reg32_deduce_bounds()
1194 max_t(u32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
1195 reg->s32_max_value = reg->u32_max_value; in __reg32_deduce_bounds()
1199 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) in __reg64_deduce_bounds() argument
1204 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg64_deduce_bounds()
1206 if (reg->smin_value >= 0 || reg->smax_value < 0) { in __reg64_deduce_bounds()
1207 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, in __reg64_deduce_bounds()
1208 reg->umin_value); in __reg64_deduce_bounds()
1209 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, in __reg64_deduce_bounds()
1210 reg->umax_value); in __reg64_deduce_bounds()
1216 if ((s64)reg->umax_value >= 0) { in __reg64_deduce_bounds()
1220 reg->smin_value = reg->umin_value; in __reg64_deduce_bounds()
1221 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, in __reg64_deduce_bounds()
1222 reg->umax_value); in __reg64_deduce_bounds()
1223 } else if ((s64)reg->umin_value < 0) { in __reg64_deduce_bounds()
1227 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, in __reg64_deduce_bounds()
1228 reg->umin_value); in __reg64_deduce_bounds()
1229 reg->smax_value = reg->umax_value; in __reg64_deduce_bounds()
1233 static void __reg_deduce_bounds(struct bpf_reg_state *reg) in __reg_deduce_bounds() argument
1235 __reg32_deduce_bounds(reg); in __reg_deduce_bounds()
1236 __reg64_deduce_bounds(reg); in __reg_deduce_bounds()
1240 static void __reg_bound_offset(struct bpf_reg_state *reg) in __reg_bound_offset() argument
1242 struct tnum var64_off = tnum_intersect(reg->var_off, in __reg_bound_offset()
1243 tnum_range(reg->umin_value, in __reg_bound_offset()
1244 reg->umax_value)); in __reg_bound_offset()
1245 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), in __reg_bound_offset()
1246 tnum_range(reg->u32_min_value, in __reg_bound_offset()
1247 reg->u32_max_value)); in __reg_bound_offset()
1249 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); in __reg_bound_offset()
1252 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) in __reg_assign_32_into_64() argument
1254 reg->umin_value = reg->u32_min_value; in __reg_assign_32_into_64()
1255 reg->umax_value = reg->u32_max_value; in __reg_assign_32_into_64()
1256 /* Attempt to pull 32-bit signed bounds into 64-bit bounds in __reg_assign_32_into_64()
1260 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) in __reg_assign_32_into_64()
1261 reg->smax_value = reg->s32_max_value; in __reg_assign_32_into_64()
1263 reg->smax_value = U32_MAX; in __reg_assign_32_into_64()
1264 if (reg->s32_min_value >= 0) in __reg_assign_32_into_64()
1265 reg->smin_value = reg->s32_min_value; in __reg_assign_32_into_64()
1267 reg->smin_value = 0; in __reg_assign_32_into_64()
1270 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) in __reg_combine_32_into_64() argument
1272 /* special case when 64-bit register has upper 32-bit register in __reg_combine_32_into_64()
1274 * allowing us to use 32-bit bounds directly, in __reg_combine_32_into_64()
1276 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { in __reg_combine_32_into_64()
1277 __reg_assign_32_into_64(reg); in __reg_combine_32_into_64()
1281 * then learn as much as possible from the 64-bit tnum in __reg_combine_32_into_64()
1286 __mark_reg64_unbounded(reg); in __reg_combine_32_into_64()
1287 __update_reg_bounds(reg); in __reg_combine_32_into_64()
1294 __reg_deduce_bounds(reg); in __reg_combine_32_into_64()
1295 __reg_bound_offset(reg); in __reg_combine_32_into_64()
1296 __update_reg_bounds(reg); in __reg_combine_32_into_64()
1311 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) in __reg_combine_64_into_32() argument
1313 __mark_reg32_unbounded(reg); in __reg_combine_64_into_32()
1315 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { in __reg_combine_64_into_32()
1316 reg->s32_min_value = (s32)reg->smin_value; in __reg_combine_64_into_32()
1317 reg->s32_max_value = (s32)reg->smax_value; in __reg_combine_64_into_32()
1319 if (__reg64_bound_u32(reg->umin_value)) in __reg_combine_64_into_32()
1320 reg->u32_min_value = (u32)reg->umin_value; in __reg_combine_64_into_32()
1321 if (__reg64_bound_u32(reg->umax_value)) in __reg_combine_64_into_32()
1322 reg->u32_max_value = (u32)reg->umax_value; in __reg_combine_64_into_32()
1328 __reg_deduce_bounds(reg); in __reg_combine_64_into_32()
1329 __reg_bound_offset(reg); in __reg_combine_64_into_32()
1330 __update_reg_bounds(reg); in __reg_combine_64_into_32()
1335 struct bpf_reg_state *reg) in __mark_reg_unknown() argument
1341 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); in __mark_reg_unknown()
1342 reg->type = SCALAR_VALUE; in __mark_reg_unknown()
1343 reg->var_off = tnum_unknown; in __mark_reg_unknown()
1344 reg->frameno = 0; in __mark_reg_unknown()
1345 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; in __mark_reg_unknown()
1346 __mark_reg_unbounded(reg); in __mark_reg_unknown()
1363 struct bpf_reg_state *reg) in __mark_reg_not_init() argument
1365 __mark_reg_unknown(env, reg); in __mark_reg_not_init()
1366 reg->type = NOT_INIT; in __mark_reg_not_init()
1399 struct bpf_reg_state *regs = state->regs; in init_reg_state()
1412 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
1415 #define BPF_MAIN_FUNC (-1)
1420 state->callsite = callsite; in init_func_state()
1421 state->frameno = frameno; in init_func_state()
1422 state->subprogno = subprogno; in init_func_state()
1434 return ((struct bpf_subprog_info *)a)->start - in cmp_subprogs()
1435 ((struct bpf_subprog_info *)b)->start; in cmp_subprogs()
1442 p = bsearch(&off, env->subprog_info, env->subprog_cnt, in find_subprog()
1443 sizeof(env->subprog_info[0]), cmp_subprogs); in find_subprog()
1445 return -ENOENT; in find_subprog()
1446 return p - env->subprog_info; in find_subprog()
1452 int insn_cnt = env->prog->len; in add_subprog()
1457 return -EINVAL; in add_subprog()
1462 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { in add_subprog()
1464 return -E2BIG; in add_subprog()
1466 env->subprog_info[env->subprog_cnt++].start = off; in add_subprog()
1467 sort(env->subprog_info, env->subprog_cnt, in add_subprog()
1468 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); in add_subprog()
1475 struct bpf_subprog_info *subprog = env->subprog_info; in check_subprogs()
1476 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
1477 int insn_cnt = env->prog->len; in check_subprogs()
1490 if (!env->bpf_capable) { in check_subprogs()
1493 return -EPERM; in check_subprogs()
1503 subprog[env->subprog_cnt].start = insn_cnt; in check_subprogs()
1505 if (env->log.level & BPF_LOG_LEVEL2) in check_subprogs()
1506 for (i = 0; i < env->subprog_cnt; i++) in check_subprogs()
1529 return -EINVAL; in check_subprogs()
1532 if (i == subprog_end - 1) { in check_subprogs()
1533 /* to avoid fall-through from one subprog into another in check_subprogs()
1540 return -EINVAL; in check_subprogs()
1544 if (cur_subprog < env->subprog_cnt) in check_subprogs()
1552 * issues like callee-saved registers, stack slot allocation time, etc.
1558 bool writes = parent == state->parent; /* Observe write marks */ in mark_reg_read()
1563 if (writes && state->live & REG_LIVE_WRITTEN) in mark_reg_read()
1565 if (parent->live & REG_LIVE_DONE) { in mark_reg_read()
1567 reg_type_str[parent->type], in mark_reg_read()
1568 parent->var_off.value, parent->off); in mark_reg_read()
1569 return -EFAULT; in mark_reg_read()
1574 if ((parent->live & REG_LIVE_READ) == flag || in mark_reg_read()
1575 parent->live & REG_LIVE_READ64) in mark_reg_read()
1579 * keep re-marking all parents as LIVE_READ. in mark_reg_read()
1581 * multiple times without writes into it in-between. in mark_reg_read()
1587 parent->live |= flag; in mark_reg_read()
1590 parent->live &= ~REG_LIVE_READ32; in mark_reg_read()
1592 parent = state->parent; in mark_reg_read()
1597 if (env->longest_mark_read_walk < cnt) in mark_reg_read()
1598 env->longest_mark_read_walk = cnt; in mark_reg_read()
1602 /* This function is supposed to be used by the following 32-bit optimization
1604 * on 64-bit, otherwise return FALSE.
1607 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) in is_reg64() argument
1611 code = insn->code; in is_reg64()
1626 if (insn->src_reg == BPF_PSEUDO_CALL) in is_reg64()
1640 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) in is_reg64()
1654 if (reg->type != SCALAR_VALUE) in is_reg64()
1666 /* Both LD_IND and LD_ABS return 32-bit data. */ in is_reg64()
1689 u8 class = BPF_CLASS(insn->code); in insn_no_def()
1695 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
1701 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); in insn_has_def32()
1705 struct bpf_reg_state *reg) in mark_insn_zext() argument
1707 s32 def_idx = reg->subreg_def; in mark_insn_zext()
1712 env->insn_aux_data[def_idx - 1].zext_dst = true; in mark_insn_zext()
1713 /* The dst will be zero extended, so won't be sub-register anymore. */ in mark_insn_zext()
1714 reg->subreg_def = DEF_NOT_SUBREG; in mark_insn_zext()
1720 struct bpf_verifier_state *vstate = env->cur_state; in check_reg_arg()
1721 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_reg_arg()
1722 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in check_reg_arg()
1723 struct bpf_reg_state *reg, *regs = state->regs; in check_reg_arg() local
1728 return -EINVAL; in check_reg_arg()
1731 reg = ®s[regno]; in check_reg_arg()
1732 rw64 = is_reg64(env, insn, regno, reg, t); in check_reg_arg()
1735 if (reg->type == NOT_INIT) { in check_reg_arg()
1737 return -EACCES; in check_reg_arg()
1739 /* We don't need to worry about FP liveness because it's read-only */ in check_reg_arg()
1744 mark_insn_zext(env, reg); in check_reg_arg()
1746 return mark_reg_read(env, reg, reg->parent, in check_reg_arg()
1752 return -EACCES; in check_reg_arg()
1754 reg->live |= REG_LIVE_WRITTEN; in check_reg_arg()
1755 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in check_reg_arg()
1766 u32 cnt = cur->jmp_history_cnt; in push_jmp_history()
1770 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); in push_jmp_history()
1772 return -ENOMEM; in push_jmp_history()
1773 p[cnt - 1].idx = env->insn_idx; in push_jmp_history()
1774 p[cnt - 1].prev_idx = env->prev_insn_idx; in push_jmp_history()
1775 cur->jmp_history = p; in push_jmp_history()
1776 cur->jmp_history_cnt = cnt; in push_jmp_history()
1788 if (cnt && st->jmp_history[cnt - 1].idx == i) { in get_prev_insn_idx()
1789 i = st->jmp_history[cnt - 1].prev_idx; in get_prev_insn_idx()
1790 (*history)--; in get_prev_insn_idx()
1792 i--; in get_prev_insn_idx()
1808 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
1809 u8 class = BPF_CLASS(insn->code); in backtrack_insn()
1810 u8 opcode = BPF_OP(insn->code); in backtrack_insn()
1811 u8 mode = BPF_MODE(insn->code); in backtrack_insn()
1812 u32 dreg = 1u << insn->dst_reg; in backtrack_insn()
1813 u32 sreg = 1u << insn->src_reg; in backtrack_insn()
1816 if (insn->code == 0) in backtrack_insn()
1818 if (env->log.level & BPF_LOG_LEVEL) { in backtrack_insn()
1821 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in backtrack_insn()
1828 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
1845 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
1866 if (insn->src_reg != BPF_REG_FP) in backtrack_insn()
1868 if (BPF_SIZE(insn->code) != BPF_DW) in backtrack_insn()
1871 /* dreg = *(u64 *)[fp - off] was a fill from the stack. in backtrack_insn()
1872 * that [fp - off] slot contains scalar that needs to be in backtrack_insn()
1875 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
1879 return -EFAULT; in backtrack_insn()
1888 return -ENOTSUPP; in backtrack_insn()
1890 if (insn->dst_reg != BPF_REG_FP) in backtrack_insn()
1892 if (BPF_SIZE(insn->code) != BPF_DW) in backtrack_insn()
1894 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
1898 return -EFAULT; in backtrack_insn()
1907 if (insn->src_reg == BPF_PSEUDO_CALL) in backtrack_insn()
1908 return -ENOTSUPP; in backtrack_insn()
1912 /* if backtracing was looking for registers R1-R5 in backtrack_insn()
1917 return -EFAULT; in backtrack_insn()
1920 return -ENOTSUPP; in backtrack_insn()
1932 return -ENOTSUPP; in backtrack_insn()
1965 * r9 -= r8
1993 struct bpf_reg_state *reg; in mark_all_scalars_precise() local
1999 for (; st; st = st->parent) in mark_all_scalars_precise()
2000 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_precise()
2001 func = st->frame[i]; in mark_all_scalars_precise()
2003 reg = &func->regs[j]; in mark_all_scalars_precise()
2004 if (reg->type != SCALAR_VALUE) in mark_all_scalars_precise()
2006 reg->precise = true; in mark_all_scalars_precise()
2008 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
2009 if (func->stack[j].slot_type[0] != STACK_SPILL) in mark_all_scalars_precise()
2011 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_precise()
2012 if (reg->type != SCALAR_VALUE) in mark_all_scalars_precise()
2014 reg->precise = true; in mark_all_scalars_precise()
2022 struct bpf_verifier_state *st = env->cur_state; in __mark_chain_precision()
2023 int first_idx = st->first_insn_idx; in __mark_chain_precision()
2024 int last_idx = env->insn_idx; in __mark_chain_precision()
2026 struct bpf_reg_state *reg; in __mark_chain_precision() local
2033 if (!env->bpf_capable) in __mark_chain_precision()
2036 func = st->frame[st->curframe]; in __mark_chain_precision()
2038 reg = &func->regs[regno]; in __mark_chain_precision()
2039 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
2041 return -EFAULT; in __mark_chain_precision()
2043 if (!reg->precise) in __mark_chain_precision()
2047 reg->precise = true; in __mark_chain_precision()
2051 if (func->stack[spi].slot_type[0] != STACK_SPILL) { in __mark_chain_precision()
2055 reg = &func->stack[spi].spilled_ptr; in __mark_chain_precision()
2056 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
2060 if (!reg->precise) in __mark_chain_precision()
2064 reg->precise = true; in __mark_chain_precision()
2074 u32 history = st->jmp_history_cnt; in __mark_chain_precision()
2076 if (env->log.level & BPF_LOG_LEVEL) in __mark_chain_precision()
2085 if (err == -ENOTSUPP) { in __mark_chain_precision()
2100 if (i >= env->prog->len) { in __mark_chain_precision()
2109 return -EFAULT; in __mark_chain_precision()
2112 st = st->parent; in __mark_chain_precision()
2117 func = st->frame[st->curframe]; in __mark_chain_precision()
2120 reg = &func->regs[i]; in __mark_chain_precision()
2121 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
2125 if (!reg->precise) in __mark_chain_precision()
2127 reg->precise = true; in __mark_chain_precision()
2132 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
2135 * 3: (7b) *(u64 *)(r3 -8) = r0 in __mark_chain_precision()
2136 * 4: (79) r4 = *(u64 *)(r10 -8) in __mark_chain_precision()
2141 * stack slot fp-8 is still marked in stack_mask. in __mark_chain_precision()
2143 * fp-8 and it's "unallocated" stack space. in __mark_chain_precision()
2150 if (func->stack[i].slot_type[0] != STACK_SPILL) { in __mark_chain_precision()
2154 reg = &func->stack[i].spilled_ptr; in __mark_chain_precision()
2155 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
2159 if (!reg->precise) in __mark_chain_precision()
2161 reg->precise = true; in __mark_chain_precision()
2163 if (env->log.level & BPF_LOG_LEVEL) { in __mark_chain_precision()
2175 last_idx = st->last_insn_idx; in __mark_chain_precision()
2176 first_idx = st->first_insn_idx; in __mark_chain_precision()
2183 return __mark_chain_precision(env, regno, -1); in mark_chain_precision()
2188 return __mark_chain_precision(env, -1, spi); in mark_chain_precision_stack()
2224 static bool register_is_null(struct bpf_reg_state *reg) in register_is_null() argument
2226 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); in register_is_null()
2229 static bool register_is_const(struct bpf_reg_state *reg) in register_is_const() argument
2231 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); in register_is_const()
2234 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) in __is_scalar_unbounded() argument
2236 return tnum_is_unknown(reg->var_off) && in __is_scalar_unbounded()
2237 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && in __is_scalar_unbounded()
2238 reg->umin_value == 0 && reg->umax_value == U64_MAX && in __is_scalar_unbounded()
2239 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && in __is_scalar_unbounded()
2240 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; in __is_scalar_unbounded()
2243 static bool register_is_bounded(struct bpf_reg_state *reg) in register_is_bounded() argument
2245 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); in register_is_bounded()
2249 const struct bpf_reg_state *reg) in __is_pointer_value() argument
2254 return reg->type != SCALAR_VALUE; in __is_pointer_value()
2258 int spi, struct bpf_reg_state *reg) in save_register_state() argument
2262 state->stack[spi].spilled_ptr = *reg; in save_register_state()
2263 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in save_register_state()
2266 state->stack[spi].slot_type[i] = STACK_SPILL; in save_register_state()
2277 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write()
2278 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; in check_stack_write()
2279 struct bpf_reg_state *reg = NULL; in check_stack_write() local
2282 state->acquired_refs, true); in check_stack_write()
2285 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, in check_stack_write()
2288 if (!env->allow_ptr_leaks && in check_stack_write()
2289 state->stack[spi].slot_type[0] == STACK_SPILL && in check_stack_write()
2292 return -EACCES; in check_stack_write()
2295 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write()
2297 reg = &cur->regs[value_regno]; in check_stack_write()
2299 if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && in check_stack_write()
2300 !register_is_null(reg) && env->bpf_capable) { in check_stack_write()
2303 * stack slot address like [fp - 8]. Other spill of in check_stack_write()
2306 * that contributed into 'reg' being a constant. in check_stack_write()
2312 save_register_state(state, spi, reg); in check_stack_write()
2313 } else if (reg && is_spillable_regtype(reg->type)) { in check_stack_write()
2318 return -EACCES; in check_stack_write()
2321 if (state != cur && reg->type == PTR_TO_STACK) { in check_stack_write()
2323 return -EINVAL; in check_stack_write()
2326 if (!env->bypass_spec_v4) { in check_stack_write()
2329 if (state->stack[spi].slot_type[0] == STACK_SPILL && in check_stack_write()
2330 register_is_const(&state->stack[spi].spilled_ptr)) in check_stack_write()
2333 if (state->stack[spi].slot_type[i] == STACK_MISC) { in check_stack_write()
2338 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; in check_stack_write()
2339 int soff = (-spi - 1) * BPF_REG_SIZE; in check_stack_write()
2343 * an attacker is trying to exploit CVE-2018-3639 in check_stack_write()
2356 return -EINVAL; in check_stack_write()
2361 save_register_state(state, spi, reg); in check_stack_write()
2366 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write()
2368 if (state->stack[spi].slot_type[0] == STACK_SPILL) in check_stack_write()
2370 state->stack[spi].slot_type[i] = STACK_MISC; in check_stack_write()
2381 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in check_stack_write()
2384 if (reg && register_is_null(reg)) { in check_stack_write()
2394 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = in check_stack_write()
2404 struct bpf_verifier_state *vstate = env->cur_state; in check_stack_read()
2405 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_stack_read()
2406 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read()
2407 struct bpf_reg_state *reg; in check_stack_read() local
2410 if (reg_state->allocated_stack <= slot) { in check_stack_read()
2413 return -EACCES; in check_stack_read()
2415 stype = reg_state->stack[spi].slot_type; in check_stack_read()
2416 reg = ®_state->stack[spi].spilled_ptr; in check_stack_read()
2420 if (reg->type != SCALAR_VALUE) { in check_stack_read()
2421 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read()
2423 return -EACCES; in check_stack_read()
2426 mark_reg_unknown(env, state->regs, value_regno); in check_stack_read()
2427 state->regs[value_regno].live |= REG_LIVE_WRITTEN; in check_stack_read()
2429 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read()
2433 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { in check_stack_read()
2435 return -EACCES; in check_stack_read()
2441 state->regs[value_regno] = *reg; in check_stack_read()
2442 /* mark reg as written since spilled pointer state likely in check_stack_read()
2444 * which resets stack/reg liveness for state transitions in check_stack_read()
2446 state->regs[value_regno].live |= REG_LIVE_WRITTEN; in check_stack_read()
2447 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { in check_stack_read()
2448 /* If value_regno==-1, the caller is asking us whether in check_stack_read()
2456 return -EACCES; in check_stack_read()
2458 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read()
2463 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) in check_stack_read()
2465 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { in check_stack_read()
2471 return -EACCES; in check_stack_read()
2473 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read()
2479 __mark_reg_const_zero(&state->regs[value_regno]); in check_stack_read()
2490 state->regs[value_regno].precise = true; in check_stack_read()
2493 mark_reg_unknown(env, state->regs, value_regno); in check_stack_read()
2495 state->regs[value_regno].live |= REG_LIVE_WRITTEN; in check_stack_read()
2502 const struct bpf_reg_state *reg, in check_stack_access() argument
2509 if (!tnum_is_const(reg->var_off)) { in check_stack_access()
2512 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access()
2515 return -EACCES; in check_stack_access()
2518 if (off >= 0 || off < -MAX_BPF_STACK) { in check_stack_access()
2520 return -EACCES; in check_stack_access()
2535 map->value_size, off, size); in check_map_access_type()
2536 return -EACCES; in check_map_access_type()
2541 map->value_size, off, size); in check_map_access_type()
2542 return -EACCES; in check_map_access_type()
2554 struct bpf_reg_state *reg; in __check_mem_access() local
2559 reg = &cur_regs(env)[regno]; in __check_mem_access()
2560 switch (reg->type) { in __check_mem_access()
2569 off, size, regno, reg->id, off, mem_size); in __check_mem_access()
2577 return -EACCES; in __check_mem_access()
2585 struct bpf_verifier_state *vstate = env->cur_state; in check_mem_region_access()
2586 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_mem_region_access()
2587 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access() local
2594 if (env->log.level & BPF_LOG_LEVEL) in check_mem_region_access()
2603 if (reg->smin_value < 0 && in check_mem_region_access()
2604 (reg->smin_value == S64_MIN || in check_mem_region_access()
2605 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || in check_mem_region_access()
2606 reg->smin_value + off < 0)) { in check_mem_region_access()
2609 return -EACCES; in check_mem_region_access()
2611 err = __check_mem_access(env, regno, reg->smin_value + off, size, in check_mem_region_access()
2621 * If reg->umax_value + off could overflow, treat that as unbounded too. in check_mem_region_access()
2623 if (reg->umax_value >= BPF_MAX_VAR_OFF) { in check_mem_region_access()
2626 return -EACCES; in check_mem_region_access()
2628 err = __check_mem_access(env, regno, reg->umax_value + off, size, in check_mem_region_access()
2643 struct bpf_verifier_state *vstate = env->cur_state; in check_map_access()
2644 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_map_access()
2645 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access() local
2646 struct bpf_map *map = reg->map_ptr; in check_map_access()
2649 err = check_mem_region_access(env, regno, off, size, map->value_size, in check_map_access()
2655 u32 lock = map->spin_lock_off; in check_map_access()
2662 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && in check_map_access()
2663 lock < reg->umax_value + off + size) { in check_map_access()
2665 return -EACCES; in check_map_access()
2675 return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; in resolve_prog_type()
2682 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in may_access_direct_pkt_data()
2704 return meta->pkt_access; in may_access_direct_pkt_data()
2706 env->seen_direct_write = true; in may_access_direct_pkt_data()
2711 env->seen_direct_write = true; in may_access_direct_pkt_data()
2724 struct bpf_reg_state *reg = ®s[regno]; in check_packet_access() local
2728 * reg->range we have comes after that. We are only checking the fixed in check_packet_access()
2735 if (reg->smin_value < 0) { in check_packet_access()
2738 return -EACCES; in check_packet_access()
2740 err = __check_mem_access(env, regno, off, size, reg->range, in check_packet_access()
2747 /* __check_mem_access has made sure "off + size - 1" is within u16. in check_packet_access()
2748 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, in check_packet_access()
2751 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. in check_packet_access()
2753 env->prog->aux->max_pkt_offset = in check_packet_access()
2754 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
2755 off + reg->umax_value + size - 1); in check_packet_access()
2767 .log = &env->log, in check_ctx_access()
2770 if (env->ops->is_valid_access && in check_ctx_access()
2771 env->ops->is_valid_access(off, size, t, env->prog, &info)) { in check_ctx_access()
2784 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; in check_ctx_access()
2786 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
2787 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
2792 return -EACCES; in check_ctx_access()
2802 return -EACCES; in check_flow_keys_access()
2812 struct bpf_reg_state *reg = ®s[regno]; in check_sock_access() local
2816 if (reg->smin_value < 0) { in check_sock_access()
2819 return -EACCES; in check_sock_access()
2822 switch (reg->type) { in check_sock_access()
2841 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
2847 regno, reg_type_str[reg->type], off, size); in check_sock_access()
2849 return -EACCES; in check_sock_access()
2859 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); in is_pointer_value()
2864 const struct bpf_reg_state *reg = reg_state(env, regno); in is_ctx_reg() local
2866 return reg->type == PTR_TO_CTX; in is_ctx_reg()
2871 const struct bpf_reg_state *reg = reg_state(env, regno); in is_sk_reg() local
2873 return type_is_sk_pointer(reg->type); in is_sk_reg()
2878 const struct bpf_reg_state *reg = reg_state(env, regno); in is_pkt_reg() local
2880 return type_is_pkt_pointer(reg->type); in is_pkt_reg()
2885 const struct bpf_reg_state *reg = reg_state(env, regno); in is_flow_key_reg() local
2888 return reg->type == PTR_TO_FLOW_KEYS; in is_flow_key_reg()
2892 const struct bpf_reg_state *reg, in check_pkt_ptr_alignment() argument
2912 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); in check_pkt_ptr_alignment()
2916 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_pkt_ptr_alignment()
2919 ip_align, tn_buf, reg->off, off, size); in check_pkt_ptr_alignment()
2920 return -EACCES; in check_pkt_ptr_alignment()
2927 const struct bpf_reg_state *reg, in check_generic_ptr_alignment() argument
2937 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); in check_generic_ptr_alignment()
2941 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_generic_ptr_alignment()
2943 pointer_desc, tn_buf, reg->off, off, size); in check_generic_ptr_alignment()
2944 return -EACCES; in check_generic_ptr_alignment()
2951 const struct bpf_reg_state *reg, int off, in check_ptr_alignment() argument
2954 bool strict = env->strict_alignment || strict_alignment_once; in check_ptr_alignment()
2957 switch (reg->type) { in check_ptr_alignment()
2963 return check_pkt_ptr_alignment(env, reg, off, size, strict); in check_ptr_alignment()
2996 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, in check_ptr_alignment()
3004 u16 stack = env->subprog_info[func->subprogno].stack_depth; in update_stack_depth()
3006 if (stack >= -off) in update_stack_depth()
3010 env->subprog_info[func->subprogno].stack_depth = -off; in update_stack_depth()
3023 struct bpf_subprog_info *subprog = env->subprog_info; in check_max_stack_depth()
3024 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth()
3038 * func1 -> sub rsp, 128 in check_max_stack_depth()
3039 * subfunc1 -> sub rsp, 256 in check_max_stack_depth()
3040 * tailcall1 -> add rsp, 256 in check_max_stack_depth()
3041 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) in check_max_stack_depth()
3042 * subfunc2 -> sub rsp, 64 in check_max_stack_depth()
3043 * subfunc22 -> sub rsp, 128 in check_max_stack_depth()
3044 * tailcall2 -> add rsp, 128 in check_max_stack_depth()
3045 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) in check_max_stack_depth()
3054 return -EACCES; in check_max_stack_depth()
3056 /* round up to 32-bytes, since this is granularity in check_max_stack_depth()
3063 return -EACCES; in check_max_stack_depth()
3082 return -EFAULT; in check_max_stack_depth()
3092 return -E2BIG; in check_max_stack_depth()
3110 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); in check_max_stack_depth()
3111 frame--; in check_max_stack_depth()
3121 int start = idx + insn->imm + 1, subprog; in get_callee_stack_depth()
3127 return -EFAULT; in get_callee_stack_depth()
3129 return env->subprog_info[subprog].stack_depth; in get_callee_stack_depth()
3134 const struct bpf_reg_state *reg, int regno) in check_ctx_reg() argument
3140 if (reg->off) { in check_ctx_reg()
3142 regno, reg->off); in check_ctx_reg()
3143 return -EACCES; in check_ctx_reg()
3146 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in check_ctx_reg()
3149 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_ctx_reg()
3151 return -EACCES; in check_ctx_reg()
3159 const struct bpf_reg_state *reg, in __check_buffer_access() argument
3166 return -EACCES; in __check_buffer_access()
3168 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_buffer_access()
3171 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_buffer_access()
3175 return -EACCES; in __check_buffer_access()
3182 const struct bpf_reg_state *reg, in check_tp_buffer_access() argument
3187 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); in check_tp_buffer_access()
3191 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
3192 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
3198 const struct bpf_reg_state *reg, in check_buffer_access() argument
3206 err = __check_buffer_access(env, buf_info, reg, regno, off, size); in check_buffer_access()
3216 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
3217 static void zext_32_to_64(struct bpf_reg_state *reg) in zext_32_to_64() argument
3219 reg->var_off = tnum_subreg(reg->var_off); in zext_32_to_64()
3220 __reg_assign_32_into_64(reg); in zext_32_to_64()
3226 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) in coerce_reg_to_size() argument
3231 reg->var_off = tnum_cast(reg->var_off, size); in coerce_reg_to_size()
3234 mask = ((u64)1 << (size * 8)) - 1; in coerce_reg_to_size()
3235 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { in coerce_reg_to_size()
3236 reg->umin_value &= mask; in coerce_reg_to_size()
3237 reg->umax_value &= mask; in coerce_reg_to_size()
3239 reg->umin_value = 0; in coerce_reg_to_size()
3240 reg->umax_value = mask; in coerce_reg_to_size()
3242 reg->smin_value = reg->umin_value; in coerce_reg_to_size()
3243 reg->smax_value = reg->umax_value; in coerce_reg_to_size()
3246 * values are also truncated so we push 64-bit bounds into in coerce_reg_to_size()
3247 * 32-bit bounds. Above were truncated < 32-bits already. in coerce_reg_to_size()
3251 __reg_combine_64_into_32(reg); in coerce_reg_to_size()
3256 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; in bpf_map_is_rdonly()
3265 err = map->ops->map_direct_value_addr(map, &addr, off); in bpf_map_direct_read()
3284 return -EINVAL; in bpf_map_direct_read()
3295 struct bpf_reg_state *reg = regs + regno; in check_ptr_to_btf_access() local
3296 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id); in check_ptr_to_btf_access()
3297 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_ptr_to_btf_access()
3305 return -EACCES; in check_ptr_to_btf_access()
3307 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in check_ptr_to_btf_access()
3310 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_ptr_to_btf_access()
3314 return -EACCES; in check_ptr_to_btf_access()
3317 if (env->ops->btf_struct_access) { in check_ptr_to_btf_access()
3318 ret = env->ops->btf_struct_access(&env->log, t, off, size, in check_ptr_to_btf_access()
3323 return -EACCES; in check_ptr_to_btf_access()
3326 ret = btf_struct_access(&env->log, t, off, size, atype, in check_ptr_to_btf_access()
3345 struct bpf_reg_state *reg = regs + regno; in check_ptr_to_map_access() local
3346 struct bpf_map *map = reg->map_ptr; in check_ptr_to_map_access()
3354 return -ENOTSUPP; in check_ptr_to_map_access()
3357 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { in check_ptr_to_map_access()
3359 map->map_type); in check_ptr_to_map_access()
3360 return -ENOTSUPP; in check_ptr_to_map_access()
3363 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); in check_ptr_to_map_access()
3364 tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_ptr_to_map_access()
3366 if (!env->allow_ptr_to_map_access) { in check_ptr_to_map_access()
3370 return -EPERM; in check_ptr_to_map_access()
3376 return -EACCES; in check_ptr_to_map_access()
3381 return -EACCES; in check_ptr_to_map_access()
3384 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); in check_ptr_to_map_access()
3398 * if t==write && value_regno==-1, some unknown value is stored into memory
3399 * if t==read && value_regno==-1, don't care what we read from memory
3406 struct bpf_reg_state *reg = regs + regno; in check_mem_access() local
3414 /* alignment checks will add in reg->off themselves */ in check_mem_access()
3415 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); in check_mem_access()
3419 /* for access checks, reg->off is just part of off */ in check_mem_access()
3420 off += reg->off; in check_mem_access()
3422 if (reg->type == PTR_TO_MAP_VALUE) { in check_mem_access()
3426 return -EACCES; in check_mem_access()
3433 struct bpf_map *map = reg->map_ptr; in check_mem_access()
3435 /* if map is read-only, track its contents as scalars */ in check_mem_access()
3436 if (tnum_is_const(reg->var_off) && in check_mem_access()
3438 map->ops->map_direct_value_addr) { in check_mem_access()
3439 int map_off = off + reg->var_off.value; in check_mem_access()
3453 } else if (reg->type == PTR_TO_MEM) { in check_mem_access()
3457 return -EACCES; in check_mem_access()
3460 reg->mem_size, false); in check_mem_access()
3463 } else if (reg->type == PTR_TO_CTX) { in check_mem_access()
3470 return -EACCES; in check_mem_access()
3473 err = check_ctx_reg(env, reg, regno); in check_mem_access()
3491 regs[value_regno].id = ++env->id_gen; in check_mem_access()
3495 * a sub-register. in check_mem_access()
3505 } else if (reg->type == PTR_TO_STACK) { in check_mem_access()
3506 off += reg->var_off.value; in check_mem_access()
3507 err = check_stack_access(env, reg, off, size); in check_mem_access()
3511 state = func(env, reg); in check_mem_access()
3522 } else if (reg_is_pkt_pointer(reg)) { in check_mem_access()
3525 return -EACCES; in check_mem_access()
3531 return -EACCES; in check_mem_access()
3536 } else if (reg->type == PTR_TO_FLOW_KEYS) { in check_mem_access()
3541 return -EACCES; in check_mem_access()
3547 } else if (type_is_sk_pointer(reg->type)) { in check_mem_access()
3550 regno, reg_type_str[reg->type]); in check_mem_access()
3551 return -EACCES; in check_mem_access()
3556 } else if (reg->type == PTR_TO_TP_BUFFER) { in check_mem_access()
3557 err = check_tp_buffer_access(env, reg, regno, off, size); in check_mem_access()
3560 } else if (reg->type == PTR_TO_BTF_ID) { in check_mem_access()
3563 } else if (reg->type == CONST_PTR_TO_MAP) { in check_mem_access()
3566 } else if (reg->type == PTR_TO_RDONLY_BUF) { in check_mem_access()
3569 regno, reg_type_str[reg->type]); in check_mem_access()
3570 return -EACCES; in check_mem_access()
3572 err = check_buffer_access(env, reg, regno, off, size, false, in check_mem_access()
3574 &env->prog->aux->max_rdonly_access); in check_mem_access()
3577 } else if (reg->type == PTR_TO_RDWR_BUF) { in check_mem_access()
3578 err = check_buffer_access(env, reg, regno, off, size, false, in check_mem_access()
3580 &env->prog->aux->max_rdwr_access); in check_mem_access()
3585 reg_type_str[reg->type]); in check_mem_access()
3586 return -EACCES; in check_mem_access()
3591 /* b/h/w load zero-extends, mark upper bits as known 0 */ in check_mem_access()
3601 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || in check_xadd()
3602 insn->imm != 0) { in check_xadd()
3604 return -EINVAL; in check_xadd()
3608 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_xadd()
3613 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_xadd()
3617 if (is_pointer_value(env, insn->src_reg)) { in check_xadd()
3618 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_xadd()
3619 return -EACCES; in check_xadd()
3622 if (is_ctx_reg(env, insn->dst_reg) || in check_xadd()
3623 is_pkt_reg(env, insn->dst_reg) || in check_xadd()
3624 is_flow_key_reg(env, insn->dst_reg) || in check_xadd()
3625 is_sk_reg(env, insn->dst_reg)) { in check_xadd()
3627 insn->dst_reg, in check_xadd()
3628 reg_type_str[reg_state(env, insn->dst_reg)->type]); in check_xadd()
3629 return -EACCES; in check_xadd()
3633 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_xadd()
3634 BPF_SIZE(insn->code), BPF_READ, -1, true); in check_xadd()
3639 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_xadd()
3640 BPF_SIZE(insn->code), BPF_WRITE, -1, true); in check_xadd()
3647 struct bpf_reg_state *reg = reg_state(env, regno); in __check_stack_boundary() local
3649 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || in __check_stack_boundary()
3651 if (tnum_is_const(reg->var_off)) { in __check_stack_boundary()
3657 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_stack_boundary()
3661 return -EACCES; in __check_stack_boundary()
3669 * Unlike most pointer bounds-checking functions, this one doesn't take an
3670 * 'off' argument, so it has to add in reg->off itself.
3676 struct bpf_reg_state *reg = reg_state(env, regno); in check_stack_boundary() local
3677 struct bpf_func_state *state = func(env, reg); in check_stack_boundary()
3680 if (tnum_is_const(reg->var_off)) { in check_stack_boundary()
3681 min_off = max_off = reg->var_off.value + reg->off; in check_stack_boundary()
3692 if (!env->bypass_spec_v1) { in check_stack_boundary()
3695 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_boundary()
3698 return -EACCES; in check_stack_boundary()
3706 if (meta && meta->raw_mode) in check_stack_boundary()
3709 if (reg->smax_value >= BPF_MAX_VAR_OFF || in check_stack_boundary()
3710 reg->smax_value <= -BPF_MAX_VAR_OFF) { in check_stack_boundary()
3713 return -EACCES; in check_stack_boundary()
3715 min_off = reg->smin_value + reg->off; in check_stack_boundary()
3716 max_off = reg->smax_value + reg->off; in check_stack_boundary()
3733 if (meta && meta->raw_mode) { in check_stack_boundary()
3734 meta->access_size = access_size; in check_stack_boundary()
3735 meta->regno = regno; in check_stack_boundary()
3742 slot = -i - 1; in check_stack_boundary()
3744 if (state->allocated_stack <= slot) in check_stack_boundary()
3746 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_boundary()
3755 if (state->stack[spi].slot_type[0] == STACK_SPILL && in check_stack_boundary()
3756 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) in check_stack_boundary()
3759 if (state->stack[spi].slot_type[0] == STACK_SPILL && in check_stack_boundary()
3760 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { in check_stack_boundary()
3761 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_boundary()
3763 state->stack[spi].slot_type[j] = STACK_MISC; in check_stack_boundary()
3768 if (tnum_is_const(reg->var_off)) { in check_stack_boundary()
3770 min_off, i - min_off, access_size); in check_stack_boundary()
3774 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_boundary()
3776 tn_buf, i - min_off, access_size); in check_stack_boundary()
3778 return -EACCES; in check_stack_boundary()
3780 /* reading any byte out of 8-byte 'spill_slot' will cause in check_stack_boundary()
3783 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_boundary()
3784 state->stack[spi].spilled_ptr.parent, in check_stack_boundary()
3794 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in check_helper_mem_access() local
3796 switch (reg->type) { in check_helper_mem_access()
3799 return check_packet_access(env, regno, reg->off, access_size, in check_helper_mem_access()
3802 if (check_map_access_type(env, regno, reg->off, access_size, in check_helper_mem_access()
3803 meta && meta->raw_mode ? BPF_WRITE : in check_helper_mem_access()
3805 return -EACCES; in check_helper_mem_access()
3806 return check_map_access(env, regno, reg->off, access_size, in check_helper_mem_access()
3809 return check_mem_region_access(env, regno, reg->off, in check_helper_mem_access()
3810 access_size, reg->mem_size, in check_helper_mem_access()
3813 if (meta && meta->raw_mode) in check_helper_mem_access()
3814 return -EACCES; in check_helper_mem_access()
3815 return check_buffer_access(env, reg, regno, reg->off, in check_helper_mem_access()
3818 &env->prog->aux->max_rdonly_access); in check_helper_mem_access()
3820 return check_buffer_access(env, reg, regno, reg->off, in check_helper_mem_access()
3823 &env->prog->aux->max_rdwr_access); in check_helper_mem_access()
3828 /* Allow zero-byte read from NULL, regardless of pointer type */ in check_helper_mem_access()
3830 register_is_null(reg)) in check_helper_mem_access()
3834 reg_type_str[reg->type], in check_helper_mem_access()
3836 return -EACCES; in check_helper_mem_access()
3842 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3843 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3844 * value_or_null->value transition, since the verifier only cares about
3848 * reg->id > 0 after value_or_null->value transition. By doing so
3852 * dead-locks.
3856 * cur_state->active_spin_lock remembers which map value element got locked
3862 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in process_spin_lock() local
3863 struct bpf_verifier_state *cur = env->cur_state; in process_spin_lock()
3864 bool is_const = tnum_is_const(reg->var_off); in process_spin_lock()
3865 struct bpf_map *map = reg->map_ptr; in process_spin_lock()
3866 u64 val = reg->var_off.value; in process_spin_lock()
3872 return -EINVAL; in process_spin_lock()
3874 if (!map->btf) { in process_spin_lock()
3877 map->name); in process_spin_lock()
3878 return -EINVAL; in process_spin_lock()
3881 if (map->spin_lock_off == -E2BIG) in process_spin_lock()
3884 map->name); in process_spin_lock()
3885 else if (map->spin_lock_off == -ENOENT) in process_spin_lock()
3888 map->name); in process_spin_lock()
3892 map->name); in process_spin_lock()
3893 return -EINVAL; in process_spin_lock()
3895 if (map->spin_lock_off != val + reg->off) { in process_spin_lock()
3897 val + reg->off); in process_spin_lock()
3898 return -EINVAL; in process_spin_lock()
3901 if (cur->active_spin_lock) { in process_spin_lock()
3904 return -EINVAL; in process_spin_lock()
3906 cur->active_spin_lock = reg->id; in process_spin_lock()
3908 if (!cur->active_spin_lock) { in process_spin_lock()
3910 return -EINVAL; in process_spin_lock()
3912 if (cur->active_spin_lock != reg->id) { in process_spin_lock()
3914 return -EINVAL; in process_spin_lock()
3916 cur->active_spin_lock = 0; in process_spin_lock()
3952 return -EINVAL; in int_ptr_type_to_size()
3959 if (!meta->map_ptr) { in resolve_map_arg_type()
3961 verbose(env, "invalid map_ptr to access map->type\n"); in resolve_map_arg_type()
3962 return -EACCES; in resolve_map_arg_type()
3965 switch (meta->map_ptr->map_type) { in resolve_map_arg_type()
3972 return -EINVAL; in resolve_map_arg_type()
4081 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in check_reg_type() local
4082 enum bpf_reg_type expected, type = reg->type; in check_reg_type()
4089 return -EFAULT; in check_reg_type()
4092 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { in check_reg_type()
4093 expected = compatible->types[i]; in check_reg_type()
4103 verbose(env, "%s, ", reg_type_str[compatible->types[j]]); in check_reg_type()
4104 verbose(env, "%s\n", reg_type_str[compatible->types[j]]); in check_reg_type()
4105 return -EACCES; in check_reg_type()
4110 if (!compatible->btf_id) { in check_reg_type()
4112 return -EFAULT; in check_reg_type()
4114 arg_btf_id = compatible->btf_id; in check_reg_type()
4117 if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id, in check_reg_type()
4120 regno, kernel_type_name(reg->btf_id), in check_reg_type()
4122 return -EACCES; in check_reg_type()
4125 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in check_reg_type()
4126 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", in check_reg_type()
4128 return -EACCES; in check_reg_type()
4140 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in check_func_arg() local
4141 enum bpf_arg_type arg_type = fn->arg_type[arg]; in check_func_arg()
4142 enum bpf_reg_type type = reg->type; in check_func_arg()
4156 return -EACCES; in check_func_arg()
4164 return -EACCES; in check_func_arg()
4175 if (register_is_null(reg) && arg_type_may_be_null(arg_type)) in check_func_arg()
4181 err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]); in check_func_arg()
4186 err = check_ctx_reg(env, reg, regno); in check_func_arg()
4192 if (reg->ref_obj_id) { in check_func_arg()
4193 if (meta->ref_obj_id) { in check_func_arg()
4195 regno, reg->ref_obj_id, in check_func_arg()
4196 meta->ref_obj_id); in check_func_arg()
4197 return -EFAULT; in check_func_arg()
4199 meta->ref_obj_id = reg->ref_obj_id; in check_func_arg()
4204 meta->map_ptr = reg->map_ptr; in check_func_arg()
4207 * check that [key, key + map->key_size) are within in check_func_arg()
4210 if (!meta->map_ptr) { in check_func_arg()
4216 verbose(env, "invalid map_ptr to access map->key\n"); in check_func_arg()
4217 return -EACCES; in check_func_arg()
4220 meta->map_ptr->key_size, false, in check_func_arg()
4224 !register_is_null(reg)) || in check_func_arg()
4227 * check [value, value + map->value_size) validity in check_func_arg()
4229 if (!meta->map_ptr) { in check_func_arg()
4231 verbose(env, "invalid map_ptr to access map->value\n"); in check_func_arg()
4232 return -EACCES; in check_func_arg()
4234 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); in check_func_arg()
4236 meta->map_ptr->value_size, false, in check_func_arg()
4239 if (!reg->btf_id) { in check_func_arg()
4241 return -EACCES; in check_func_arg()
4243 meta->ret_btf_id = reg->btf_id; in check_func_arg()
4245 if (meta->func_id == BPF_FUNC_spin_lock) { in check_func_arg()
4247 return -EACCES; in check_func_arg()
4248 } else if (meta->func_id == BPF_FUNC_spin_unlock) { in check_func_arg()
4250 return -EACCES; in check_func_arg()
4253 return -EFAULT; in check_func_arg()
4259 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM); in check_func_arg()
4271 meta->msize_max_value = reg->umax_value; in check_func_arg()
4276 if (!tnum_is_const(reg->var_off)) in check_func_arg()
4284 if (reg->smin_value < 0) { in check_func_arg()
4287 return -EACCES; in check_func_arg()
4290 if (reg->umin_value == 0) { in check_func_arg()
4291 err = check_helper_mem_access(env, regno - 1, 0, in check_func_arg()
4298 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { in check_func_arg()
4301 return -EACCES; in check_func_arg()
4303 err = check_helper_mem_access(env, regno - 1, in check_func_arg()
4304 reg->umax_value, in check_func_arg()
4309 if (!tnum_is_const(reg->var_off)) { in check_func_arg()
4312 return -EACCES; in check_func_arg()
4314 meta->mem_size = reg->var_off.value; in check_func_arg()
4321 err = check_ptr_alignment(env, reg, 0, size, true); in check_func_arg()
4329 enum bpf_attach_type eatype = env->prog->expected_attach_type; in may_update_sockmap()
4330 enum bpf_prog_type type = resolve_prog_type(env->prog); in may_update_sockmap()
4361 return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64); in allow_tail_call_in_subprogs()
4371 switch (map->map_type) { in check_map_func_compatibility()
4412 /* Restrict bpf side of cpumap and xskmap, open when use-cases in check_map_func_compatibility()
4477 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) in check_map_func_compatibility()
4479 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { in check_map_func_compatibility()
4480 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in check_map_func_compatibility()
4481 return -EINVAL; in check_map_func_compatibility()
4489 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) in check_map_func_compatibility()
4493 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) in check_map_func_compatibility()
4498 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) in check_map_func_compatibility()
4502 if (map->map_type != BPF_MAP_TYPE_DEVMAP && in check_map_func_compatibility()
4503 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && in check_map_func_compatibility()
4504 map->map_type != BPF_MAP_TYPE_CPUMAP && in check_map_func_compatibility()
4505 map->map_type != BPF_MAP_TYPE_XSKMAP) in check_map_func_compatibility()
4511 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) in check_map_func_compatibility()
4517 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
4521 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in check_map_func_compatibility()
4522 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in check_map_func_compatibility()
4526 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && in check_map_func_compatibility()
4527 map->map_type != BPF_MAP_TYPE_SOCKMAP && in check_map_func_compatibility()
4528 map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
4534 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
4535 map->map_type != BPF_MAP_TYPE_STACK) in check_map_func_compatibility()
4540 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) in check_map_func_compatibility()
4545 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) in check_map_func_compatibility()
4555 map->map_type, func_id_name(func_id), func_id); in check_map_func_compatibility()
4556 return -EINVAL; in check_map_func_compatibility()
4563 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
4565 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
4567 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
4569 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
4571 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) in check_raw_mode_ok()
4597 if (arg_type_is_mem_size(fn->arg1_type) || in check_arg_pair_ok()
4598 arg_type_is_mem_ptr(fn->arg5_type) || in check_arg_pair_ok()
4599 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || in check_arg_pair_ok()
4600 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || in check_arg_pair_ok()
4601 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || in check_arg_pair_ok()
4602 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) in check_arg_pair_ok()
4612 if (arg_type_may_be_refcounted(fn->arg1_type)) in check_refcount_ok()
4614 if (arg_type_may_be_refcounted(fn->arg2_type)) in check_refcount_ok()
4616 if (arg_type_may_be_refcounted(fn->arg3_type)) in check_refcount_ok()
4618 if (arg_type_may_be_refcounted(fn->arg4_type)) in check_refcount_ok()
4620 if (arg_type_may_be_refcounted(fn->arg5_type)) in check_refcount_ok()
4639 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { in check_btf_id_ok()
4640 if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) in check_btf_id_ok()
4643 if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i]) in check_btf_id_ok()
4655 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; in check_func_proto()
4664 struct bpf_reg_state *regs = state->regs, *reg; in __clear_all_pkt_pointers() local
4671 bpf_for_each_spilled_reg(i, state, reg) { in __clear_all_pkt_pointers()
4672 if (!reg) in __clear_all_pkt_pointers()
4674 if (reg_is_pkt_pointer_any(reg)) in __clear_all_pkt_pointers()
4675 __mark_reg_unknown(env, reg); in __clear_all_pkt_pointers()
4681 struct bpf_verifier_state *vstate = env->cur_state; in clear_all_pkt_pointers()
4684 for (i = 0; i <= vstate->curframe; i++) in clear_all_pkt_pointers()
4685 __clear_all_pkt_pointers(env, vstate->frame[i]); in clear_all_pkt_pointers()
4692 struct bpf_reg_state *regs = state->regs, *reg; in release_reg_references() local
4699 bpf_for_each_spilled_reg(i, state, reg) { in release_reg_references()
4700 if (!reg) in release_reg_references()
4702 if (reg->ref_obj_id == ref_obj_id) in release_reg_references()
4703 __mark_reg_unknown(env, reg); in release_reg_references()
4713 struct bpf_verifier_state *vstate = env->cur_state; in release_reference()
4721 for (i = 0; i <= vstate->curframe; i++) in release_reference()
4722 release_reg_references(env, vstate->frame[i], ref_obj_id); in release_reference()
4732 /* after the call registers r0 - r5 were scratched */ in clear_caller_saved_regs()
4742 struct bpf_verifier_state *state = env->cur_state; in check_func_call()
4748 if (state->curframe + 1 >= MAX_CALL_FRAMES) { in check_func_call()
4750 state->curframe + 2); in check_func_call()
4751 return -E2BIG; in check_func_call()
4754 target_insn = *insn_idx + insn->imm; in check_func_call()
4759 return -EFAULT; in check_func_call()
4762 caller = state->frame[state->curframe]; in check_func_call()
4763 if (state->frame[state->curframe + 1]) { in check_func_call()
4765 state->curframe + 1); in check_func_call()
4766 return -EFAULT; in check_func_call()
4769 func_info_aux = env->prog->aux->func_info_aux; in check_func_call()
4772 err = btf_check_func_arg_match(env, subprog, caller->regs); in check_func_call()
4773 if (err == -EFAULT) in check_func_call()
4781 if (env->log.level & BPF_LOG_LEVEL) in check_func_call()
4785 clear_caller_saved_regs(env, caller->regs); in check_func_call()
4788 mark_reg_unknown(env, caller->regs, BPF_REG_0); in check_func_call()
4797 return -ENOMEM; in check_func_call()
4798 state->frame[state->curframe + 1] = callee; in check_func_call()
4800 /* callee cannot access r0, r6 - r9 for reading and has to write in check_func_call()
4807 state->curframe + 1 /* frameno within this callchain */, in check_func_call()
4815 /* copy r1 - r5 args that callee can access. The copy includes parent in check_func_call()
4819 callee->regs[i] = caller->regs[i]; in check_func_call()
4821 clear_caller_saved_regs(env, caller->regs); in check_func_call()
4824 state->curframe++; in check_func_call()
4829 if (env->log.level & BPF_LOG_LEVEL) { in check_func_call()
4840 struct bpf_verifier_state *state = env->cur_state; in prepare_func_exit()
4845 callee = state->frame[state->curframe]; in prepare_func_exit()
4846 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
4847 if (r0->type == PTR_TO_STACK) { in prepare_func_exit()
4855 return -EINVAL; in prepare_func_exit()
4858 state->curframe--; in prepare_func_exit()
4859 caller = state->frame[state->curframe]; in prepare_func_exit()
4861 caller->regs[BPF_REG_0] = *r0; in prepare_func_exit()
4868 *insn_idx = callee->callsite + 1; in prepare_func_exit()
4869 if (env->log.level & BPF_LOG_LEVEL) { in prepare_func_exit()
4877 state->frame[state->curframe + 1] = NULL; in prepare_func_exit()
4894 ret_reg->smax_value = meta->msize_max_value; in do_refine_retval_range()
4895 ret_reg->s32_max_value = meta->msize_max_value; in do_refine_retval_range()
4896 ret_reg->smin_value = -MAX_ERRNO; in do_refine_retval_range()
4897 ret_reg->s32_min_value = -MAX_ERRNO; in do_refine_retval_range()
4907 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
4908 struct bpf_map *map = meta->map_ptr; in record_func_map()
4921 return -EINVAL; in record_func_map()
4924 /* In case of read-only, some additional restrictions in record_func_map()
4928 if ((map->map_flags & BPF_F_RDONLY_PROG) && in record_func_map()
4934 return -EACCES; in record_func_map()
4937 if (!BPF_MAP_PTR(aux->map_ptr_state)) in record_func_map()
4938 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
4939 !meta->map_ptr->bypass_spec_v1); in record_func_map()
4940 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) in record_func_map()
4942 !meta->map_ptr->bypass_spec_v1); in record_func_map()
4950 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_key()
4951 struct bpf_reg_state *regs = cur_regs(env), *reg; in record_func_key() local
4952 struct bpf_map *map = meta->map_ptr; in record_func_key()
4959 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { in record_func_key()
4961 return -EINVAL; in record_func_key()
4964 range = tnum_range(0, map->max_entries - 1); in record_func_key()
4965 reg = ®s[BPF_REG_3]; in record_func_key()
4967 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { in record_func_key()
4976 val = reg->var_off.value; in record_func_key()
4990 for (i = 0; i < state->acquired_refs; i++) { in check_reference_leak()
4992 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
4994 return state->acquired_refs ? -EINVAL : 0; in check_reference_leak()
5009 return -EINVAL; in check_helper_call()
5012 if (env->ops->get_func_proto) in check_helper_call()
5013 fn = env->ops->get_func_proto(func_id, env->prog); in check_helper_call()
5017 return -EINVAL; in check_helper_call()
5020 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in check_helper_call()
5021 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
5022 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); in check_helper_call()
5023 return -EINVAL; in check_helper_call()
5026 if (fn->allowed && !fn->allowed(env->prog)) { in check_helper_call()
5028 return -EINVAL; in check_helper_call()
5032 changes_data = bpf_helper_changes_pkt_data(fn->func); in check_helper_call()
5033 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { in check_helper_call()
5036 return -EINVAL; in check_helper_call()
5040 meta.pkt_access = fn->pkt_access; in check_helper_call()
5070 BPF_WRITE, -1, false); in check_helper_call()
5097 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); in check_helper_call()
5098 return -EINVAL; in check_helper_call()
5107 /* helper call returns 64-bit value. */ in check_helper_call()
5111 if (fn->ret_type == RET_INTEGER) { in check_helper_call()
5114 } else if (fn->ret_type == RET_VOID) { in check_helper_call()
5116 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || in check_helper_call()
5117 fn->ret_type == RET_PTR_TO_MAP_VALUE) { in check_helper_call()
5127 return -EINVAL; in check_helper_call()
5130 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { in check_helper_call()
5133 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
5137 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { in check_helper_call()
5140 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { in check_helper_call()
5143 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { in check_helper_call()
5146 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { in check_helper_call()
5150 } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL || in check_helper_call()
5151 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) { in check_helper_call()
5164 tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_helper_call()
5167 return -EINVAL; in check_helper_call()
5170 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? in check_helper_call()
5175 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? in check_helper_call()
5179 } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { in check_helper_call()
5184 ret_btf_id = *fn->ret_btf_id; in check_helper_call()
5187 fn->ret_type, func_id_name(func_id), func_id); in check_helper_call()
5188 return -EINVAL; in check_helper_call()
5193 fn->ret_type, func_id_name(func_id), func_id); in check_helper_call()
5194 return -EINVAL; in check_helper_call()
5198 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
5214 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); in check_helper_call()
5222 !env->prog->has_callchain_buf) { in check_helper_call()
5229 err = -ENOTSUPP; in check_helper_call()
5237 env->prog->has_callchain_buf = true; in check_helper_call()
5241 env->prog->call_get_stack = true; in check_helper_call()
5250 /* Do the add in u64, where overflow is well-defined */ in signed_add_overflows()
5260 /* Do the add in u32, where overflow is well-defined */ in signed_add32_overflows()
5270 /* Do the sub in u64, where overflow is well-defined */ in signed_sub_overflows()
5271 s64 res = (s64)((u64)a - (u64)b); in signed_sub_overflows()
5280 /* Do the sub in u64, where overflow is well-defined */ in signed_sub32_overflows()
5281 s32 res = (s32)((u32)a - (u32)b); in signed_sub32_overflows()
5289 const struct bpf_reg_state *reg, in check_reg_sane_offset() argument
5292 bool known = tnum_is_const(reg->var_off); in check_reg_sane_offset()
5293 s64 val = reg->var_off.value; in check_reg_sane_offset()
5294 s64 smin = reg->smin_value; in check_reg_sane_offset()
5296 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { in check_reg_sane_offset()
5302 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
5304 reg_type_str[type], reg->off); in check_reg_sane_offset()
5314 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
5325 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
5335 switch (ptr_reg->type) { in retrieve_ptr_limit()
5340 off = ptr_reg->off + ptr_reg->var_off.value; in retrieve_ptr_limit()
5344 *ptr_limit = -off; in retrieve_ptr_limit()
5348 *ptr_limit = ptr_reg->umax_value + ptr_reg->off; in retrieve_ptr_limit()
5350 off = ptr_reg->smin_value + ptr_reg->off; in retrieve_ptr_limit()
5351 *ptr_limit = ptr_reg->map_ptr->value_size - off; in retrieve_ptr_limit()
5355 return -EINVAL; in retrieve_ptr_limit()
5362 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; in can_skip_alu_sanitation()
5371 if (aux->alu_state && in update_alu_sanitation_state()
5372 (aux->alu_state != alu_state || in update_alu_sanitation_state()
5373 aux->alu_limit != alu_limit)) in update_alu_sanitation_state()
5374 return -EACCES; in update_alu_sanitation_state()
5377 aux->alu_state = alu_state; in update_alu_sanitation_state()
5378 aux->alu_limit = alu_limit; in update_alu_sanitation_state()
5399 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_ptr_alu()
5402 u8 opcode = BPF_OP(insn->code); in sanitize_ptr_alu()
5410 /* We already marked aux for masking from non-speculative in sanitize_ptr_alu()
5414 if (vstate->speculative) in sanitize_ptr_alu()
5424 return -EACCES; in sanitize_ptr_alu()
5426 /* Simulate and find potential out-of-bounds access under in sanitize_ptr_alu()
5430 * to simulate dst (== 0) +/-= ptr. Needed, for example, in sanitize_ptr_alu()
5431 * for cases where we use K-based arithmetic in one direction in sanitize_ptr_alu()
5432 * and truncated reg-based in the other in order to explore in sanitize_ptr_alu()
5439 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); in sanitize_ptr_alu()
5442 return !ret ? -EFAULT : 0; in sanitize_ptr_alu()
5447 * If we return -EACCES, caller may want to try again treating pointer as a
5448 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
5455 struct bpf_verifier_state *vstate = env->cur_state; in adjust_ptr_min_max_vals()
5456 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_ptr_min_max_vals()
5457 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals()
5458 bool known = tnum_is_const(off_reg->var_off); in adjust_ptr_min_max_vals()
5459 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, in adjust_ptr_min_max_vals()
5460 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; in adjust_ptr_min_max_vals()
5461 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, in adjust_ptr_min_max_vals()
5462 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; in adjust_ptr_min_max_vals()
5463 u32 dst = insn->dst_reg, src = insn->src_reg; in adjust_ptr_min_max_vals()
5464 u8 opcode = BPF_OP(insn->code); in adjust_ptr_min_max_vals()
5478 if (BPF_CLASS(insn->code) != BPF_ALU64) { in adjust_ptr_min_max_vals()
5479 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ in adjust_ptr_min_max_vals()
5480 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_ptr_min_max_vals()
5486 "R%d 32-bit pointer arithmetic prohibited\n", in adjust_ptr_min_max_vals()
5488 return -EACCES; in adjust_ptr_min_max_vals()
5491 switch (ptr_reg->type) { in adjust_ptr_min_max_vals()
5493 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", in adjust_ptr_min_max_vals()
5494 dst, reg_type_str[ptr_reg->type]); in adjust_ptr_min_max_vals()
5495 return -EACCES; in adjust_ptr_min_max_vals()
5510 dst, reg_type_str[ptr_reg->type]); in adjust_ptr_min_max_vals()
5511 return -EACCES; in adjust_ptr_min_max_vals()
5513 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { in adjust_ptr_min_max_vals()
5516 return -EACCES; in adjust_ptr_min_max_vals()
5526 dst_reg->type = ptr_reg->type; in adjust_ptr_min_max_vals()
5527 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
5529 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || in adjust_ptr_min_max_vals()
5530 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
5531 return -EINVAL; in adjust_ptr_min_max_vals()
5533 /* pointer types do not carry 32-bit bounds at the moment. */ in adjust_ptr_min_max_vals()
5546 if (known && (ptr_reg->off + smin_val == in adjust_ptr_min_max_vals()
5547 (s64)(s32)(ptr_reg->off + smin_val))) { in adjust_ptr_min_max_vals()
5549 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
5550 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
5551 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
5552 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
5553 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
5554 dst_reg->off = ptr_reg->off + smin_val; in adjust_ptr_min_max_vals()
5555 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
5558 /* A new variable offset is created. Note that off_reg->off in adjust_ptr_min_max_vals()
5569 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
5570 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
5572 dst_reg->smin_value = smin_ptr + smin_val; in adjust_ptr_min_max_vals()
5573 dst_reg->smax_value = smax_ptr + smax_val; in adjust_ptr_min_max_vals()
5577 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
5578 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
5580 dst_reg->umin_value = umin_ptr + umin_val; in adjust_ptr_min_max_vals()
5581 dst_reg->umax_value = umax_ptr + umax_val; in adjust_ptr_min_max_vals()
5583 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
5584 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
5585 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
5587 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
5589 dst_reg->raw = 0; in adjust_ptr_min_max_vals()
5599 /* scalar -= pointer. Creates an unknown scalar */ in adjust_ptr_min_max_vals()
5602 return -EACCES; in adjust_ptr_min_max_vals()
5608 if (ptr_reg->type == PTR_TO_STACK) { in adjust_ptr_min_max_vals()
5611 return -EACCES; in adjust_ptr_min_max_vals()
5613 if (known && (ptr_reg->off - smin_val == in adjust_ptr_min_max_vals()
5614 (s64)(s32)(ptr_reg->off - smin_val))) { in adjust_ptr_min_max_vals()
5615 /* pointer -= K. Subtract it from fixed offset */ in adjust_ptr_min_max_vals()
5616 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
5617 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
5618 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
5619 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
5620 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
5621 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
5622 dst_reg->off = ptr_reg->off - smin_val; in adjust_ptr_min_max_vals()
5623 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
5627 * nonnegative, then any reg->range we had before is still good. in adjust_ptr_min_max_vals()
5632 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
5633 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
5635 dst_reg->smin_value = smin_ptr - smax_val; in adjust_ptr_min_max_vals()
5636 dst_reg->smax_value = smax_ptr - smin_val; in adjust_ptr_min_max_vals()
5640 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
5641 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
5644 dst_reg->umin_value = umin_ptr - umax_val; in adjust_ptr_min_max_vals()
5645 dst_reg->umax_value = umax_ptr - umin_val; in adjust_ptr_min_max_vals()
5647 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
5648 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
5649 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
5651 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
5654 dst_reg->raw = 0; in adjust_ptr_min_max_vals()
5663 return -EACCES; in adjust_ptr_min_max_vals()
5665 /* other operators (e.g. MUL,LSH) produce non-pointer results */ in adjust_ptr_min_max_vals()
5668 return -EACCES; in adjust_ptr_min_max_vals()
5671 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
5672 return -EINVAL; in adjust_ptr_min_max_vals()
5681 if (!env->bypass_spec_v1) { in adjust_ptr_min_max_vals()
5682 if (dst_reg->type == PTR_TO_MAP_VALUE && in adjust_ptr_min_max_vals()
5683 check_map_access(env, dst, dst_reg->off, 1, false)) { in adjust_ptr_min_max_vals()
5686 return -EACCES; in adjust_ptr_min_max_vals()
5687 } else if (dst_reg->type == PTR_TO_STACK && in adjust_ptr_min_max_vals()
5688 check_stack_access(env, dst_reg, dst_reg->off + in adjust_ptr_min_max_vals()
5689 dst_reg->var_off.value, 1)) { in adjust_ptr_min_max_vals()
5692 return -EACCES; in adjust_ptr_min_max_vals()
5702 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_add()
5703 s32 smax_val = src_reg->s32_max_value; in scalar32_min_max_add()
5704 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_add()
5705 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_add()
5707 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || in scalar32_min_max_add()
5708 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { in scalar32_min_max_add()
5709 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_add()
5710 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_add()
5712 dst_reg->s32_min_value += smin_val; in scalar32_min_max_add()
5713 dst_reg->s32_max_value += smax_val; in scalar32_min_max_add()
5715 if (dst_reg->u32_min_value + umin_val < umin_val || in scalar32_min_max_add()
5716 dst_reg->u32_max_value + umax_val < umax_val) { in scalar32_min_max_add()
5717 dst_reg->u32_min_value = 0; in scalar32_min_max_add()
5718 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_add()
5720 dst_reg->u32_min_value += umin_val; in scalar32_min_max_add()
5721 dst_reg->u32_max_value += umax_val; in scalar32_min_max_add()
5728 s64 smin_val = src_reg->smin_value; in scalar_min_max_add()
5729 s64 smax_val = src_reg->smax_value; in scalar_min_max_add()
5730 u64 umin_val = src_reg->umin_value; in scalar_min_max_add()
5731 u64 umax_val = src_reg->umax_value; in scalar_min_max_add()
5733 if (signed_add_overflows(dst_reg->smin_value, smin_val) || in scalar_min_max_add()
5734 signed_add_overflows(dst_reg->smax_value, smax_val)) { in scalar_min_max_add()
5735 dst_reg->smin_value = S64_MIN; in scalar_min_max_add()
5736 dst_reg->smax_value = S64_MAX; in scalar_min_max_add()
5738 dst_reg->smin_value += smin_val; in scalar_min_max_add()
5739 dst_reg->smax_value += smax_val; in scalar_min_max_add()
5741 if (dst_reg->umin_value + umin_val < umin_val || in scalar_min_max_add()
5742 dst_reg->umax_value + umax_val < umax_val) { in scalar_min_max_add()
5743 dst_reg->umin_value = 0; in scalar_min_max_add()
5744 dst_reg->umax_value = U64_MAX; in scalar_min_max_add()
5746 dst_reg->umin_value += umin_val; in scalar_min_max_add()
5747 dst_reg->umax_value += umax_val; in scalar_min_max_add()
5754 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_sub()
5755 s32 smax_val = src_reg->s32_max_value; in scalar32_min_max_sub()
5756 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_sub()
5757 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_sub()
5759 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || in scalar32_min_max_sub()
5760 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { in scalar32_min_max_sub()
5762 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_sub()
5763 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_sub()
5765 dst_reg->s32_min_value -= smax_val; in scalar32_min_max_sub()
5766 dst_reg->s32_max_value -= smin_val; in scalar32_min_max_sub()
5768 if (dst_reg->u32_min_value < umax_val) { in scalar32_min_max_sub()
5770 dst_reg->u32_min_value = 0; in scalar32_min_max_sub()
5771 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_sub()
5774 dst_reg->u32_min_value -= umax_val; in scalar32_min_max_sub()
5775 dst_reg->u32_max_value -= umin_val; in scalar32_min_max_sub()
5782 s64 smin_val = src_reg->smin_value; in scalar_min_max_sub()
5783 s64 smax_val = src_reg->smax_value; in scalar_min_max_sub()
5784 u64 umin_val = src_reg->umin_value; in scalar_min_max_sub()
5785 u64 umax_val = src_reg->umax_value; in scalar_min_max_sub()
5787 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || in scalar_min_max_sub()
5788 signed_sub_overflows(dst_reg->smax_value, smin_val)) { in scalar_min_max_sub()
5790 dst_reg->smin_value = S64_MIN; in scalar_min_max_sub()
5791 dst_reg->smax_value = S64_MAX; in scalar_min_max_sub()
5793 dst_reg->smin_value -= smax_val; in scalar_min_max_sub()
5794 dst_reg->smax_value -= smin_val; in scalar_min_max_sub()
5796 if (dst_reg->umin_value < umax_val) { in scalar_min_max_sub()
5798 dst_reg->umin_value = 0; in scalar_min_max_sub()
5799 dst_reg->umax_value = U64_MAX; in scalar_min_max_sub()
5802 dst_reg->umin_value -= umax_val; in scalar_min_max_sub()
5803 dst_reg->umax_value -= umin_val; in scalar_min_max_sub()
5810 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_mul()
5811 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_mul()
5812 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_mul()
5814 if (smin_val < 0 || dst_reg->s32_min_value < 0) { in scalar32_min_max_mul()
5822 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { in scalar32_min_max_mul()
5827 dst_reg->u32_min_value *= umin_val; in scalar32_min_max_mul()
5828 dst_reg->u32_max_value *= umax_val; in scalar32_min_max_mul()
5829 if (dst_reg->u32_max_value > S32_MAX) { in scalar32_min_max_mul()
5831 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_mul()
5832 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_mul()
5834 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_mul()
5835 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_mul()
5842 s64 smin_val = src_reg->smin_value; in scalar_min_max_mul()
5843 u64 umin_val = src_reg->umin_value; in scalar_min_max_mul()
5844 u64 umax_val = src_reg->umax_value; in scalar_min_max_mul()
5846 if (smin_val < 0 || dst_reg->smin_value < 0) { in scalar_min_max_mul()
5854 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { in scalar_min_max_mul()
5859 dst_reg->umin_value *= umin_val; in scalar_min_max_mul()
5860 dst_reg->umax_value *= umax_val; in scalar_min_max_mul()
5861 if (dst_reg->umax_value > S64_MAX) { in scalar_min_max_mul()
5863 dst_reg->smin_value = S64_MIN; in scalar_min_max_mul()
5864 dst_reg->smax_value = S64_MAX; in scalar_min_max_mul()
5866 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_mul()
5867 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_mul()
5874 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_and()
5875 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_and()
5876 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_and()
5877 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_and()
5878 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_and()
5881 * to skip updating register for known 32-bit case. in scalar32_min_max_and()
5889 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_and()
5890 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); in scalar32_min_max_and()
5891 if (dst_reg->s32_min_value < 0 || smin_val < 0) { in scalar32_min_max_and()
5895 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_and()
5896 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_and()
5901 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_and()
5902 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_and()
5910 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_and()
5911 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_and()
5912 s64 smin_val = src_reg->smin_value; in scalar_min_max_and()
5913 u64 umax_val = src_reg->umax_value; in scalar_min_max_and()
5916 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_and()
5923 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_and()
5924 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); in scalar_min_max_and()
5925 if (dst_reg->smin_value < 0 || smin_val < 0) { in scalar_min_max_and()
5929 dst_reg->smin_value = S64_MIN; in scalar_min_max_and()
5930 dst_reg->smax_value = S64_MAX; in scalar_min_max_and()
5935 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_and()
5936 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_and()
5945 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_or()
5946 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_or()
5947 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_or()
5948 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_or()
5949 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_or()
5960 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); in scalar32_min_max_or()
5961 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_or()
5962 if (dst_reg->s32_min_value < 0 || smin_val < 0) { in scalar32_min_max_or()
5966 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_or()
5967 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_or()
5972 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_or()
5973 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_or()
5980 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_or()
5981 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_or()
5982 s64 smin_val = src_reg->smin_value; in scalar_min_max_or()
5983 u64 umin_val = src_reg->umin_value; in scalar_min_max_or()
5986 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_or()
5993 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); in scalar_min_max_or()
5994 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_or()
5995 if (dst_reg->smin_value < 0 || smin_val < 0) { in scalar_min_max_or()
5999 dst_reg->smin_value = S64_MIN; in scalar_min_max_or()
6000 dst_reg->smax_value = S64_MAX; in scalar_min_max_or()
6005 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_or()
6006 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_or()
6015 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_xor()
6016 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_xor()
6017 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_xor()
6018 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_xor()
6027 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_xor()
6028 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_xor()
6030 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { in scalar32_min_max_xor()
6034 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_xor()
6035 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_xor()
6037 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_xor()
6038 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_xor()
6045 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_xor()
6046 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_xor()
6047 s64 smin_val = src_reg->smin_value; in scalar_min_max_xor()
6050 /* dst_reg->var_off.value has been updated earlier */ in scalar_min_max_xor()
6051 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_xor()
6056 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_xor()
6057 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_xor()
6059 if (dst_reg->smin_value >= 0 && smin_val >= 0) { in scalar_min_max_xor()
6063 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_xor()
6064 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_xor()
6066 dst_reg->smin_value = S64_MIN; in scalar_min_max_xor()
6067 dst_reg->smax_value = S64_MAX; in scalar_min_max_xor()
6079 dst_reg->s32_min_value = S32_MIN; in __scalar32_min_max_lsh()
6080 dst_reg->s32_max_value = S32_MAX; in __scalar32_min_max_lsh()
6082 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { in __scalar32_min_max_lsh()
6083 dst_reg->u32_min_value = 0; in __scalar32_min_max_lsh()
6084 dst_reg->u32_max_value = U32_MAX; in __scalar32_min_max_lsh()
6086 dst_reg->u32_min_value <<= umin_val; in __scalar32_min_max_lsh()
6087 dst_reg->u32_max_value <<= umax_val; in __scalar32_min_max_lsh()
6094 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_lsh()
6095 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_lsh()
6097 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_lsh()
6100 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); in scalar32_min_max_lsh()
6119 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) in __scalar64_min_max_lsh()
6120 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; in __scalar64_min_max_lsh()
6122 dst_reg->smax_value = S64_MAX; in __scalar64_min_max_lsh()
6124 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) in __scalar64_min_max_lsh()
6125 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; in __scalar64_min_max_lsh()
6127 dst_reg->smin_value = S64_MIN; in __scalar64_min_max_lsh()
6130 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { in __scalar64_min_max_lsh()
6131 dst_reg->umin_value = 0; in __scalar64_min_max_lsh()
6132 dst_reg->umax_value = U64_MAX; in __scalar64_min_max_lsh()
6134 dst_reg->umin_value <<= umin_val; in __scalar64_min_max_lsh()
6135 dst_reg->umax_value <<= umax_val; in __scalar64_min_max_lsh()
6142 u64 umax_val = src_reg->umax_value; in scalar_min_max_lsh()
6143 u64 umin_val = src_reg->umin_value; in scalar_min_max_lsh()
6149 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); in scalar_min_max_lsh()
6157 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_rsh()
6158 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_rsh()
6159 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_rsh()
6175 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_rsh()
6176 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_rsh()
6178 dst_reg->var_off = tnum_rshift(subreg, umin_val); in scalar32_min_max_rsh()
6179 dst_reg->u32_min_value >>= umax_val; in scalar32_min_max_rsh()
6180 dst_reg->u32_max_value >>= umin_val; in scalar32_min_max_rsh()
6189 u64 umax_val = src_reg->umax_value; in scalar_min_max_rsh()
6190 u64 umin_val = src_reg->umin_value; in scalar_min_max_rsh()
6206 dst_reg->smin_value = S64_MIN; in scalar_min_max_rsh()
6207 dst_reg->smax_value = S64_MAX; in scalar_min_max_rsh()
6208 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); in scalar_min_max_rsh()
6209 dst_reg->umin_value >>= umax_val; in scalar_min_max_rsh()
6210 dst_reg->umax_value >>= umin_val; in scalar_min_max_rsh()
6223 u64 umin_val = src_reg->u32_min_value; in scalar32_min_max_arsh()
6228 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); in scalar32_min_max_arsh()
6229 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); in scalar32_min_max_arsh()
6231 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); in scalar32_min_max_arsh()
6236 dst_reg->u32_min_value = 0; in scalar32_min_max_arsh()
6237 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_arsh()
6246 u64 umin_val = src_reg->umin_value; in scalar_min_max_arsh()
6251 dst_reg->smin_value >>= umin_val; in scalar_min_max_arsh()
6252 dst_reg->smax_value >>= umin_val; in scalar_min_max_arsh()
6254 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); in scalar_min_max_arsh()
6259 dst_reg->umin_value = 0; in scalar_min_max_arsh()
6260 dst_reg->umax_value = U64_MAX; in scalar_min_max_arsh()
6263 * on bits being shifted in from upper 32-bits. Take easy way out in scalar_min_max_arsh()
6270 /* WARNING: This function does calculations on 64-bit values, but the actual
6271 * execution may occur on 32-bit values. Therefore, things like bitshifts
6272 * need extra checks in the 32-bit case.
6280 u8 opcode = BPF_OP(insn->code); in adjust_scalar_min_max_vals()
6286 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; in adjust_scalar_min_max_vals()
6287 u32 dst = insn->dst_reg; in adjust_scalar_min_max_vals()
6289 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_scalar_min_max_vals()
6337 * understand and calculate behavior in both 32-bit and 64-bit alu ops. in adjust_scalar_min_max_vals()
6342 * the reg unbounded in the subreg bound space and use the resulting in adjust_scalar_min_max_vals()
6354 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
6364 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
6367 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
6372 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
6377 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
6382 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
6391 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
6404 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
6417 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
6426 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
6446 struct bpf_verifier_state *vstate = env->cur_state; in adjust_reg_min_max_vals()
6447 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_reg_min_max_vals()
6448 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals()
6450 u8 opcode = BPF_OP(insn->code); in adjust_reg_min_max_vals()
6453 dst_reg = ®s[insn->dst_reg]; in adjust_reg_min_max_vals()
6455 if (dst_reg->type != SCALAR_VALUE) in adjust_reg_min_max_vals()
6461 dst_reg->id = 0; in adjust_reg_min_max_vals()
6462 if (BPF_SRC(insn->code) == BPF_X) { in adjust_reg_min_max_vals()
6463 src_reg = ®s[insn->src_reg]; in adjust_reg_min_max_vals()
6464 if (src_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
6465 if (dst_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
6470 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_reg_min_max_vals()
6471 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
6475 insn->dst_reg, in adjust_reg_min_max_vals()
6477 return -EACCES; in adjust_reg_min_max_vals()
6483 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
6491 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
6498 /* Pretend the src is a reg with a known value, since we only in adjust_reg_min_max_vals()
6502 __mark_reg_known(&off_reg, insn->imm); in adjust_reg_min_max_vals()
6513 return -EINVAL; in adjust_reg_min_max_vals()
6518 return -EINVAL; in adjust_reg_min_max_vals()
6523 /* check validity of 32-bit and 64-bit arithmetic operations */
6527 u8 opcode = BPF_OP(insn->code); in check_alu_op()
6532 if (BPF_SRC(insn->code) != 0 || in check_alu_op()
6533 insn->src_reg != BPF_REG_0 || in check_alu_op()
6534 insn->off != 0 || insn->imm != 0) { in check_alu_op()
6536 return -EINVAL; in check_alu_op()
6539 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || in check_alu_op()
6540 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || in check_alu_op()
6541 BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
6543 return -EINVAL; in check_alu_op()
6548 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
6552 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
6554 insn->dst_reg); in check_alu_op()
6555 return -EACCES; in check_alu_op()
6559 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
6565 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
6566 if (insn->imm != 0 || insn->off != 0) { in check_alu_op()
6568 return -EINVAL; in check_alu_op()
6572 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
6576 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
6578 return -EINVAL; in check_alu_op()
6583 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
6587 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
6588 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
6589 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
6591 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
6593 * copy register state to dest reg in check_alu_op()
6595 if (src_reg->type == SCALAR_VALUE && !src_reg->id) in check_alu_op()
6600 src_reg->id = ++env->id_gen; in check_alu_op()
6602 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
6603 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
6606 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
6609 insn->src_reg); in check_alu_op()
6610 return -EACCES; in check_alu_op()
6611 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
6617 dst_reg->id = 0; in check_alu_op()
6618 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
6619 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
6622 insn->dst_reg); in check_alu_op()
6628 * remember the value we stored into this reg in check_alu_op()
6631 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
6632 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
6633 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
6634 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
6635 insn->imm); in check_alu_op()
6637 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
6638 (u32)insn->imm); in check_alu_op()
6644 return -EINVAL; in check_alu_op()
6648 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
6649 if (insn->imm != 0 || insn->off != 0) { in check_alu_op()
6651 return -EINVAL; in check_alu_op()
6654 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
6658 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
6660 return -EINVAL; in check_alu_op()
6665 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
6670 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { in check_alu_op()
6672 return -EINVAL; in check_alu_op()
6676 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { in check_alu_op()
6677 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; in check_alu_op()
6679 if (insn->imm < 0 || insn->imm >= size) { in check_alu_op()
6680 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
6681 return -EINVAL; in check_alu_op()
6686 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
6700 struct bpf_reg_state *reg; in __find_good_pkt_pointers() local
6704 reg = &state->regs[i]; in __find_good_pkt_pointers()
6705 if (reg->type == type && reg->id == dst_reg->id) in __find_good_pkt_pointers()
6707 reg->range = max(reg->range, new_range); in __find_good_pkt_pointers()
6710 bpf_for_each_spilled_reg(i, state, reg) { in __find_good_pkt_pointers()
6711 if (!reg) in __find_good_pkt_pointers()
6713 if (reg->type == type && reg->id == dst_reg->id) in __find_good_pkt_pointers()
6714 reg->range = max(reg->range, new_range); in __find_good_pkt_pointers()
6726 if (dst_reg->off < 0 || in find_good_pkt_pointers()
6727 (dst_reg->off == 0 && range_right_open)) in find_good_pkt_pointers()
6731 if (dst_reg->umax_value > MAX_PACKET_OFF || in find_good_pkt_pointers()
6732 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) in find_good_pkt_pointers()
6738 new_range = dst_reg->off; in find_good_pkt_pointers()
6740 new_range--; in find_good_pkt_pointers()
6779 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) in find_good_pkt_pointers()
6780 * and [r3, r3 + 8-1) respectively is safe to access depending on in find_good_pkt_pointers()
6785 * don't care about the other reg's fixed offset, since if it's too big in find_good_pkt_pointers()
6787 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. in find_good_pkt_pointers()
6789 for (i = 0; i <= vstate->curframe; i++) in find_good_pkt_pointers()
6790 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, in find_good_pkt_pointers()
6794 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) in is_branch32_taken() argument
6796 struct tnum subreg = tnum_subreg(reg->var_off); in is_branch32_taken()
6815 if (reg->u32_min_value > val) in is_branch32_taken()
6817 else if (reg->u32_max_value <= val) in is_branch32_taken()
6821 if (reg->s32_min_value > sval) in is_branch32_taken()
6823 else if (reg->s32_max_value < sval) in is_branch32_taken()
6827 if (reg->u32_max_value < val) in is_branch32_taken()
6829 else if (reg->u32_min_value >= val) in is_branch32_taken()
6833 if (reg->s32_max_value < sval) in is_branch32_taken()
6835 else if (reg->s32_min_value >= sval) in is_branch32_taken()
6839 if (reg->u32_min_value >= val) in is_branch32_taken()
6841 else if (reg->u32_max_value < val) in is_branch32_taken()
6845 if (reg->s32_min_value >= sval) in is_branch32_taken()
6847 else if (reg->s32_max_value < sval) in is_branch32_taken()
6851 if (reg->u32_max_value <= val) in is_branch32_taken()
6853 else if (reg->u32_min_value > val) in is_branch32_taken()
6857 if (reg->s32_max_value <= sval) in is_branch32_taken()
6859 else if (reg->s32_min_value > sval) in is_branch32_taken()
6864 return -1; in is_branch32_taken()
6868 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) in is_branch64_taken() argument
6874 if (tnum_is_const(reg->var_off)) in is_branch64_taken()
6875 return !!tnum_equals_const(reg->var_off, val); in is_branch64_taken()
6878 if (tnum_is_const(reg->var_off)) in is_branch64_taken()
6879 return !tnum_equals_const(reg->var_off, val); in is_branch64_taken()
6882 if ((~reg->var_off.mask & reg->var_off.value) & val) in is_branch64_taken()
6884 if (!((reg->var_off.mask | reg->var_off.value) & val)) in is_branch64_taken()
6888 if (reg->umin_value > val) in is_branch64_taken()
6890 else if (reg->umax_value <= val) in is_branch64_taken()
6894 if (reg->smin_value > sval) in is_branch64_taken()
6896 else if (reg->smax_value < sval) in is_branch64_taken()
6900 if (reg->umax_value < val) in is_branch64_taken()
6902 else if (reg->umin_value >= val) in is_branch64_taken()
6906 if (reg->smax_value < sval) in is_branch64_taken()
6908 else if (reg->smin_value >= sval) in is_branch64_taken()
6912 if (reg->umin_value >= val) in is_branch64_taken()
6914 else if (reg->umax_value < val) in is_branch64_taken()
6918 if (reg->smin_value >= sval) in is_branch64_taken()
6920 else if (reg->smax_value < sval) in is_branch64_taken()
6924 if (reg->umax_value <= val) in is_branch64_taken()
6926 else if (reg->umin_value > val) in is_branch64_taken()
6930 if (reg->smax_value <= sval) in is_branch64_taken()
6932 else if (reg->smin_value > sval) in is_branch64_taken()
6937 return -1; in is_branch64_taken()
6940 /* compute branch direction of the expression "if (reg opcode val) goto target;"
6942 * 1 - branch will be taken and "goto target" will be executed
6943 * 0 - branch will not be taken and fall-through to next insn
6944 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
6947 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, in is_branch_taken() argument
6950 if (__is_pointer_value(false, reg)) { in is_branch_taken()
6951 if (!reg_type_not_null(reg->type)) in is_branch_taken()
6952 return -1; in is_branch_taken()
6958 return -1; in is_branch_taken()
6966 return -1; in is_branch_taken()
6971 return is_branch32_taken(reg, val, opcode); in is_branch_taken()
6972 return is_branch64_taken(reg, val, opcode); in is_branch_taken()
6985 struct tnum false_32off = tnum_subreg(false_reg->var_off); in reg_set_min_max()
6986 struct tnum false_64off = false_reg->var_off; in reg_set_min_max()
6987 struct tnum true_32off = tnum_subreg(true_reg->var_off); in reg_set_min_max()
6988 struct tnum true_64off = true_reg->var_off; in reg_set_min_max()
7005 struct bpf_reg_state *reg = in reg_set_min_max() local
7017 __mark_reg32_known(reg, val32); in reg_set_min_max()
7019 ___mark_reg_known(reg, val); in reg_set_min_max()
7039 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; in reg_set_min_max()
7042 false_reg->u32_max_value = min(false_reg->u32_max_value, in reg_set_min_max()
7044 true_reg->u32_min_value = max(true_reg->u32_min_value, in reg_set_min_max()
7047 u64 false_umax = opcode == BPF_JGT ? val : val - 1; in reg_set_min_max()
7050 false_reg->umax_value = min(false_reg->umax_value, false_umax); in reg_set_min_max()
7051 true_reg->umin_value = max(true_reg->umin_value, true_umin); in reg_set_min_max()
7059 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; in reg_set_min_max()
7062 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); in reg_set_min_max()
7063 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); in reg_set_min_max()
7065 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; in reg_set_min_max()
7068 false_reg->smax_value = min(false_reg->smax_value, false_smax); in reg_set_min_max()
7069 true_reg->smin_value = max(true_reg->smin_value, true_smin); in reg_set_min_max()
7078 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; in reg_set_min_max()
7080 false_reg->u32_min_value = max(false_reg->u32_min_value, in reg_set_min_max()
7082 true_reg->u32_max_value = min(true_reg->u32_max_value, in reg_set_min_max()
7086 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; in reg_set_min_max()
7088 false_reg->umin_value = max(false_reg->umin_value, false_umin); in reg_set_min_max()
7089 true_reg->umax_value = min(true_reg->umax_value, true_umax); in reg_set_min_max()
7098 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; in reg_set_min_max()
7100 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); in reg_set_min_max()
7101 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); in reg_set_min_max()
7104 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; in reg_set_min_max()
7106 false_reg->smin_value = max(false_reg->smin_value, false_smin); in reg_set_min_max()
7107 true_reg->smax_value = min(true_reg->smax_value, true_smax); in reg_set_min_max()
7116 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), in reg_set_min_max()
7118 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), in reg_set_min_max()
7123 false_reg->var_off = false_64off; in reg_set_min_max()
7124 true_reg->var_off = true_64off; in reg_set_min_max()
7131 * the variable reg.
7166 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, in __reg_combine_min_max()
7167 dst_reg->umin_value); in __reg_combine_min_max()
7168 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, in __reg_combine_min_max()
7169 dst_reg->umax_value); in __reg_combine_min_max()
7170 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, in __reg_combine_min_max()
7171 dst_reg->smin_value); in __reg_combine_min_max()
7172 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, in __reg_combine_min_max()
7173 dst_reg->smax_value); in __reg_combine_min_max()
7174 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, in __reg_combine_min_max()
7175 dst_reg->var_off); in __reg_combine_min_max()
7210 struct bpf_reg_state *reg, u32 id, in mark_ptr_or_null_reg() argument
7213 if (reg_type_may_be_null(reg->type) && reg->id == id && in mark_ptr_or_null_reg()
7214 !WARN_ON_ONCE(!reg->id)) { in mark_ptr_or_null_reg()
7216 * have been known-zero, because we don't allow pointer in mark_ptr_or_null_reg()
7219 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || in mark_ptr_or_null_reg()
7220 !tnum_equals_const(reg->var_off, 0) || in mark_ptr_or_null_reg()
7221 reg->off)) { in mark_ptr_or_null_reg()
7222 __mark_reg_known_zero(reg); in mark_ptr_or_null_reg()
7223 reg->off = 0; in mark_ptr_or_null_reg()
7226 reg->type = SCALAR_VALUE; in mark_ptr_or_null_reg()
7227 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { in mark_ptr_or_null_reg()
7228 const struct bpf_map *map = reg->map_ptr; in mark_ptr_or_null_reg()
7230 if (map->inner_map_meta) { in mark_ptr_or_null_reg()
7231 reg->type = CONST_PTR_TO_MAP; in mark_ptr_or_null_reg()
7232 reg->map_ptr = map->inner_map_meta; in mark_ptr_or_null_reg()
7233 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { in mark_ptr_or_null_reg()
7234 reg->type = PTR_TO_XDP_SOCK; in mark_ptr_or_null_reg()
7235 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || in mark_ptr_or_null_reg()
7236 map->map_type == BPF_MAP_TYPE_SOCKHASH) { in mark_ptr_or_null_reg()
7237 reg->type = PTR_TO_SOCKET; in mark_ptr_or_null_reg()
7239 reg->type = PTR_TO_MAP_VALUE; in mark_ptr_or_null_reg()
7241 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { in mark_ptr_or_null_reg()
7242 reg->type = PTR_TO_SOCKET; in mark_ptr_or_null_reg()
7243 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { in mark_ptr_or_null_reg()
7244 reg->type = PTR_TO_SOCK_COMMON; in mark_ptr_or_null_reg()
7245 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { in mark_ptr_or_null_reg()
7246 reg->type = PTR_TO_TCP_SOCK; in mark_ptr_or_null_reg()
7247 } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) { in mark_ptr_or_null_reg()
7248 reg->type = PTR_TO_BTF_ID; in mark_ptr_or_null_reg()
7249 } else if (reg->type == PTR_TO_MEM_OR_NULL) { in mark_ptr_or_null_reg()
7250 reg->type = PTR_TO_MEM; in mark_ptr_or_null_reg()
7251 } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) { in mark_ptr_or_null_reg()
7252 reg->type = PTR_TO_RDONLY_BUF; in mark_ptr_or_null_reg()
7253 } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) { in mark_ptr_or_null_reg()
7254 reg->type = PTR_TO_RDWR_BUF; in mark_ptr_or_null_reg()
7261 reg->id = 0; in mark_ptr_or_null_reg()
7262 reg->ref_obj_id = 0; in mark_ptr_or_null_reg()
7263 } else if (!reg_may_point_to_spin_lock(reg)) { in mark_ptr_or_null_reg()
7264 /* For not-NULL ptr, reg->ref_obj_id will be reset in mark_ptr_or_null_reg()
7267 * reg->id is still used by spin_lock ptr. Other in mark_ptr_or_null_reg()
7268 * than spin_lock ptr type, reg->id can be reset. in mark_ptr_or_null_reg()
7270 reg->id = 0; in mark_ptr_or_null_reg()
7278 struct bpf_reg_state *reg; in __mark_ptr_or_null_regs() local
7282 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); in __mark_ptr_or_null_regs()
7284 bpf_for_each_spilled_reg(i, state, reg) { in __mark_ptr_or_null_regs()
7285 if (!reg) in __mark_ptr_or_null_regs()
7287 mark_ptr_or_null_reg(state, reg, id, is_null); in __mark_ptr_or_null_regs()
7297 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_ptr_or_null_regs()
7298 struct bpf_reg_state *regs = state->regs; in mark_ptr_or_null_regs()
7310 for (i = 0; i <= vstate->curframe; i++) in mark_ptr_or_null_regs()
7311 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); in mark_ptr_or_null_regs()
7320 if (BPF_SRC(insn->code) != BPF_X) in try_match_pkt_pointers()
7323 /* Pointers are always 64-bit. */ in try_match_pkt_pointers()
7324 if (BPF_CLASS(insn->code) == BPF_JMP32) in try_match_pkt_pointers()
7327 switch (BPF_OP(insn->code)) { in try_match_pkt_pointers()
7329 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
7330 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
7331 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
7335 dst_reg->type, false); in try_match_pkt_pointers()
7336 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
7337 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
7339 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
7342 src_reg->type, true); in try_match_pkt_pointers()
7348 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
7349 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
7350 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
7354 dst_reg->type, true); in try_match_pkt_pointers()
7355 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
7356 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
7358 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
7361 src_reg->type, false); in try_match_pkt_pointers()
7367 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
7368 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
7369 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
7373 dst_reg->type, true); in try_match_pkt_pointers()
7374 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
7375 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
7377 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
7380 src_reg->type, false); in try_match_pkt_pointers()
7386 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
7387 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
7388 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
7392 dst_reg->type, false); in try_match_pkt_pointers()
7393 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
7394 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
7396 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
7399 src_reg->type, true); in try_match_pkt_pointers()
7415 struct bpf_reg_state *reg; in find_equal_scalars() local
7418 for (i = 0; i <= vstate->curframe; i++) { in find_equal_scalars()
7419 state = vstate->frame[i]; in find_equal_scalars()
7421 reg = &state->regs[j]; in find_equal_scalars()
7422 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) in find_equal_scalars()
7423 *reg = *known_reg; in find_equal_scalars()
7426 bpf_for_each_spilled_reg(j, state, reg) { in find_equal_scalars()
7427 if (!reg) in find_equal_scalars()
7429 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) in find_equal_scalars()
7430 *reg = *known_reg; in find_equal_scalars()
7438 struct bpf_verifier_state *this_branch = env->cur_state; in check_cond_jmp_op()
7440 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; in check_cond_jmp_op()
7442 u8 opcode = BPF_OP(insn->code); in check_cond_jmp_op()
7444 int pred = -1; in check_cond_jmp_op()
7450 return -EINVAL; in check_cond_jmp_op()
7453 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
7454 if (insn->imm != 0) { in check_cond_jmp_op()
7456 return -EINVAL; in check_cond_jmp_op()
7460 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
7464 if (is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
7466 insn->src_reg); in check_cond_jmp_op()
7467 return -EACCES; in check_cond_jmp_op()
7469 src_reg = ®s[insn->src_reg]; in check_cond_jmp_op()
7471 if (insn->src_reg != BPF_REG_0) { in check_cond_jmp_op()
7473 return -EINVAL; in check_cond_jmp_op()
7478 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
7482 dst_reg = ®s[insn->dst_reg]; in check_cond_jmp_op()
7483 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in check_cond_jmp_op()
7485 if (BPF_SRC(insn->code) == BPF_K) { in check_cond_jmp_op()
7486 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); in check_cond_jmp_op()
7487 } else if (src_reg->type == SCALAR_VALUE && in check_cond_jmp_op()
7488 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { in check_cond_jmp_op()
7490 tnum_subreg(src_reg->var_off).value, in check_cond_jmp_op()
7493 } else if (src_reg->type == SCALAR_VALUE && in check_cond_jmp_op()
7494 !is_jmp32 && tnum_is_const(src_reg->var_off)) { in check_cond_jmp_op()
7496 src_reg->var_off.value, in check_cond_jmp_op()
7506 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
7507 if (BPF_SRC(insn->code) == BPF_X && !err) in check_cond_jmp_op()
7508 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
7513 /* only follow the goto, ignore fall-through */ in check_cond_jmp_op()
7514 *insn_idx += insn->off; in check_cond_jmp_op()
7517 /* only follow fall-through branch, since in check_cond_jmp_op()
7523 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
7526 return -EFAULT; in check_cond_jmp_op()
7527 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; in check_cond_jmp_op()
7536 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
7537 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; in check_cond_jmp_op()
7539 if (dst_reg->type == SCALAR_VALUE && in check_cond_jmp_op()
7540 src_reg->type == SCALAR_VALUE) { in check_cond_jmp_op()
7541 if (tnum_is_const(src_reg->var_off) || in check_cond_jmp_op()
7543 tnum_is_const(tnum_subreg(src_reg->var_off)))) in check_cond_jmp_op()
7544 reg_set_min_max(&other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
7546 src_reg->var_off.value, in check_cond_jmp_op()
7547 tnum_subreg(src_reg->var_off).value, in check_cond_jmp_op()
7549 else if (tnum_is_const(dst_reg->var_off) || in check_cond_jmp_op()
7551 tnum_is_const(tnum_subreg(dst_reg->var_off)))) in check_cond_jmp_op()
7552 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], in check_cond_jmp_op()
7554 dst_reg->var_off.value, in check_cond_jmp_op()
7555 tnum_subreg(dst_reg->var_off).value, in check_cond_jmp_op()
7560 reg_combine_min_max(&other_branch_regs[insn->src_reg], in check_cond_jmp_op()
7561 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
7563 if (src_reg->id && in check_cond_jmp_op()
7564 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { in check_cond_jmp_op()
7566 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); in check_cond_jmp_op()
7570 } else if (dst_reg->type == SCALAR_VALUE) { in check_cond_jmp_op()
7571 reg_set_min_max(&other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
7572 dst_reg, insn->imm, (u32)insn->imm, in check_cond_jmp_op()
7576 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && in check_cond_jmp_op()
7577 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { in check_cond_jmp_op()
7579 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); in check_cond_jmp_op()
7586 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && in check_cond_jmp_op()
7587 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && in check_cond_jmp_op()
7588 reg_type_may_be_null(dst_reg->type)) { in check_cond_jmp_op()
7592 mark_ptr_or_null_regs(this_branch, insn->dst_reg, in check_cond_jmp_op()
7594 mark_ptr_or_null_regs(other_branch, insn->dst_reg, in check_cond_jmp_op()
7596 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], in check_cond_jmp_op()
7598 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
7600 insn->dst_reg); in check_cond_jmp_op()
7601 return -EACCES; in check_cond_jmp_op()
7603 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
7604 print_verifier_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
7617 if (BPF_SIZE(insn->code) != BPF_DW) { in check_ld_imm()
7619 return -EINVAL; in check_ld_imm()
7621 if (insn->off != 0) { in check_ld_imm()
7623 return -EINVAL; in check_ld_imm()
7626 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
7630 dst_reg = ®s[insn->dst_reg]; in check_ld_imm()
7631 if (insn->src_reg == 0) { in check_ld_imm()
7632 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; in check_ld_imm()
7634 dst_reg->type = SCALAR_VALUE; in check_ld_imm()
7635 __mark_reg_known(®s[insn->dst_reg], imm); in check_ld_imm()
7639 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { in check_ld_imm()
7640 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
7642 dst_reg->type = aux->btf_var.reg_type; in check_ld_imm()
7643 switch (dst_reg->type) { in check_ld_imm()
7645 dst_reg->mem_size = aux->btf_var.mem_size; in check_ld_imm()
7649 dst_reg->btf_id = aux->btf_var.btf_id; in check_ld_imm()
7653 return -EFAULT; in check_ld_imm()
7658 map = env->used_maps[aux->map_index]; in check_ld_imm()
7659 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
7660 dst_reg->map_ptr = map; in check_ld_imm()
7662 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { in check_ld_imm()
7663 dst_reg->type = PTR_TO_MAP_VALUE; in check_ld_imm()
7664 dst_reg->off = aux->map_off; in check_ld_imm()
7666 dst_reg->id = ++env->id_gen; in check_ld_imm()
7667 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { in check_ld_imm()
7668 dst_reg->type = CONST_PTR_TO_MAP; in check_ld_imm()
7671 return -EINVAL; in check_ld_imm()
7690 * - they can only appear in the programs where ctx == skb
7691 * - since they are wrappers of function calls, they scratch R1-R5 registers,
7692 * preserve R6-R9, and store return value into R0
7699 * IMM == 32-bit immediate
7702 * R0 - 8/16/32-bit skb data converted to cpu endianness
7708 u8 mode = BPF_MODE(insn->code); in check_ld_abs()
7711 if (!may_access_skb(resolve_prog_type(env->prog))) { in check_ld_abs()
7713 return -EINVAL; in check_ld_abs()
7716 if (!env->ops->gen_ld_abs) { in check_ld_abs()
7718 return -EINVAL; in check_ld_abs()
7721 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || in check_ld_abs()
7722 BPF_SIZE(insn->code) == BPF_DW || in check_ld_abs()
7723 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { in check_ld_abs()
7725 return -EINVAL; in check_ld_abs()
7743 if (env->cur_state->active_spin_lock) { in check_ld_abs()
7744 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); in check_ld_abs()
7745 return -EINVAL; in check_ld_abs()
7751 return -EINVAL; in check_ld_abs()
7756 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
7776 /* ld_abs load up to 32-bit skb data. */ in check_ld_abs()
7777 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
7784 const struct bpf_prog *prog = env->prog; in check_return_code()
7785 struct bpf_reg_state *reg; in check_return_code() local
7787 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_return_code()
7789 const bool is_subprog = env->cur_state->frame[0]->subprogno; in check_return_code()
7791 /* LSM and struct_ops func-ptr's return type could be "void" */ in check_return_code()
7795 !prog->aux->attach_func_proto->type) in check_return_code()
7810 return -EACCES; in check_return_code()
7813 reg = cur_regs(env) + BPF_REG_0; in check_return_code()
7815 if (reg->type != SCALAR_VALUE) { in check_return_code()
7817 reg_type_str[reg->type]); in check_return_code()
7818 return -EINVAL; in check_return_code()
7825 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
7826 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || in check_return_code()
7827 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || in check_return_code()
7828 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || in check_return_code()
7829 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || in check_return_code()
7830 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) in check_return_code()
7834 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
7846 if (!env->prog->aux->attach_btf_id) in check_return_code()
7851 switch (env->prog->expected_attach_type) { in check_return_code()
7862 return -ENOTSUPP; in check_return_code()
7870 * depends on the to-be-replaced kernel func or bpf program. in check_return_code()
7876 if (reg->type != SCALAR_VALUE) { in check_return_code()
7878 reg_type_str[reg->type]); in check_return_code()
7879 return -EINVAL; in check_return_code()
7882 if (!tnum_in(range, reg->var_off)) { in check_return_code()
7886 if (!tnum_is_unknown(reg->var_off)) { in check_return_code()
7887 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_return_code()
7894 return -EINVAL; in check_return_code()
7898 tnum_in(enforce_attach_type_range, reg->var_off)) in check_return_code()
7899 env->prog->enforce_expected_attach_type = 1; in check_return_code()
7903 /* non-recursive DFS pseudo code
7904 * 1 procedure DFS-iterative(G,v):
7909 * 6 t <- S.pop()
7915 * 12 w <- G.adjacentVertex(t,e)
7917 * 14 label e as tree-edge
7922 * 19 label e as back-edge
7925 * 22 label e as forward- or cross-edge
7930 * 0x10 - discovered
7931 * 0x11 - discovered and fall-through edge labelled
7932 * 0x12 - discovered and fall-through and branch edges labelled
7933 * 0x20 - explored
7945 return env->prog->len; in state_htab_size()
7952 struct bpf_verifier_state *cur = env->cur_state; in explored_state()
7953 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state()
7955 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
7960 env->insn_aux_data[idx].prune_point = true; in init_explored_state()
7963 /* t, w, e - match pseudo-code above:
7964 * t - index of current instruction
7965 * w - next instruction
7966 * e - edge
7971 int *insn_stack = env->cfg.insn_stack; in push_insn()
7972 int *insn_state = env->cfg.insn_state; in push_insn()
7980 if (w < 0 || w >= env->prog->len) { in push_insn()
7983 return -EINVAL; in push_insn()
7991 /* tree-edge */ in push_insn()
7994 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
7995 return -E2BIG; in push_insn()
7996 insn_stack[env->cfg.cur_stack++] = w; in push_insn()
7999 if (loop_ok && env->bpf_capable) in push_insn()
8003 verbose(env, "back-edge from insn %d to %d\n", t, w); in push_insn()
8004 return -EINVAL; in push_insn()
8006 /* forward- or cross-edge */ in push_insn()
8010 return -EFAULT; in push_insn()
8015 /* non-recursive depth-first-search to detect loops in BPF program
8016 * loop == back-edge in directed graph
8020 struct bpf_insn *insns = env->prog->insnsi; in check_cfg()
8021 int insn_cnt = env->prog->len; in check_cfg()
8026 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
8028 return -ENOMEM; in check_cfg()
8030 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
8033 return -ENOMEM; in check_cfg()
8038 env->cfg.cur_stack = 1; in check_cfg()
8041 if (env->cfg.cur_stack == 0) in check_cfg()
8043 t = insn_stack[env->cfg.cur_stack - 1]; in check_cfg()
8070 ret = -EINVAL; in check_cfg()
8106 /* all other non-branch instructions with single in check_cfg()
8107 * fall-through edge in check_cfg()
8118 if (env->cfg.cur_stack-- <= 0) { in check_cfg()
8120 ret = -EFAULT; in check_cfg()
8129 ret = -EINVAL; in check_cfg()
8138 env->cfg.insn_state = env->cfg.insn_stack = NULL; in check_cfg()
8146 for (i = 1; i < env->subprog_cnt; i++) { in check_abnormal_return()
8147 if (env->subprog_info[i].has_ld_abs) { in check_abnormal_return()
8149 return -EINVAL; in check_abnormal_return()
8151 if (env->subprog_info[i].has_tail_call) { in check_abnormal_return()
8153 return -EINVAL; in check_abnormal_return()
8177 int ret = -ENOMEM; in check_btf_func()
8179 nfuncs = attr->func_info_cnt; in check_btf_func()
8182 return -EINVAL; in check_btf_func()
8186 if (nfuncs != env->subprog_cnt) { in check_btf_func()
8188 return -EINVAL; in check_btf_func()
8191 urec_size = attr->func_info_rec_size; in check_btf_func()
8196 return -EINVAL; in check_btf_func()
8199 prog = env->prog; in check_btf_func()
8200 btf = prog->aux->btf; in check_btf_func()
8202 urecord = u64_to_user_ptr(attr->func_info); in check_btf_func()
8207 return -ENOMEM; in check_btf_func()
8215 if (ret == -E2BIG) { in check_btf_func()
8220 if (put_user(min_size, &uattr->func_info_rec_size)) in check_btf_func()
8221 ret = -EFAULT; in check_btf_func()
8227 ret = -EFAULT; in check_btf_func()
8232 ret = -EINVAL; in check_btf_func()
8247 if (env->subprog_info[i].start != krecord[i].insn_off) { in check_btf_func()
8259 info_aux[i].linkage = BTF_INFO_VLEN(type->info); in check_btf_func()
8261 func_proto = btf_type_by_id(btf, type->type); in check_btf_func()
8265 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); in check_btf_func()
8268 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { in check_btf_func()
8272 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { in check_btf_func()
8281 prog->aux->func_info = krecord; in check_btf_func()
8282 prog->aux->func_info_cnt = nfuncs; in check_btf_func()
8283 prog->aux->func_info_aux = info_aux; in check_btf_func()
8294 struct bpf_prog_aux *aux = env->prog->aux; in adjust_btf_func()
8297 if (!aux->func_info) in adjust_btf_func()
8300 for (i = 0; i < env->subprog_cnt; i++) in adjust_btf_func()
8301 aux->func_info[i].insn_off = env->subprog_info[i].start; in adjust_btf_func()
8305 sizeof(((struct bpf_line_info *)(0))->line_col))
8320 nr_linfo = attr->line_info_cnt; in check_btf_line()
8324 rec_size = attr->line_info_rec_size; in check_btf_line()
8327 rec_size & (sizeof(u32) - 1)) in check_btf_line()
8328 return -EINVAL; in check_btf_line()
8336 return -ENOMEM; in check_btf_line()
8338 prog = env->prog; in check_btf_line()
8339 btf = prog->aux->btf; in check_btf_line()
8342 sub = env->subprog_info; in check_btf_line()
8343 ulinfo = u64_to_user_ptr(attr->line_info); in check_btf_line()
8349 if (err == -E2BIG) { in check_btf_line()
8352 &uattr->line_info_rec_size)) in check_btf_line()
8353 err = -EFAULT; in check_btf_line()
8359 err = -EFAULT; in check_btf_line()
8366 * 2) bounded by prog->len in check_btf_line()
8375 linfo[i].insn_off >= prog->len) { in check_btf_line()
8376 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", in check_btf_line()
8378 prog->len); in check_btf_line()
8379 err = -EINVAL; in check_btf_line()
8383 if (!prog->insnsi[linfo[i].insn_off].code) { in check_btf_line()
8387 err = -EINVAL; in check_btf_line()
8394 err = -EINVAL; in check_btf_line()
8398 if (s != env->subprog_cnt) { in check_btf_line()
8404 err = -EINVAL; in check_btf_line()
8413 if (s != env->subprog_cnt) { in check_btf_line()
8415 env->subprog_cnt - s, s); in check_btf_line()
8416 err = -EINVAL; in check_btf_line()
8420 prog->aux->linfo = linfo; in check_btf_line()
8421 prog->aux->nr_linfo = nr_linfo; in check_btf_line()
8437 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info()
8439 return -EINVAL; in check_btf_info()
8443 btf = btf_get_by_fd(attr->prog_btf_fd); in check_btf_info()
8446 env->prog->aux->btf = btf; in check_btf_info()
8463 return old->umin_value <= cur->umin_value && in range_within()
8464 old->umax_value >= cur->umax_value && in range_within()
8465 old->smin_value <= cur->smin_value && in range_within()
8466 old->smax_value >= cur->smax_value; in range_within()
8479 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
8512 live = st->regs[i].live; in clean_func_state()
8514 st->regs[i].live |= REG_LIVE_DONE; in clean_func_state()
8519 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
8522 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
8523 live = st->stack[i].spilled_ptr.live; in clean_func_state()
8525 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; in clean_func_state()
8527 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); in clean_func_state()
8529 st->stack[i].slot_type[j] = STACK_INVALID; in clean_func_state()
8539 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) in clean_verifier_state()
8543 for (i = 0; i <= st->curframe; i++) in clean_verifier_state()
8544 clean_func_state(env, st->frame[i]); in clean_verifier_state()
8587 if (sl->state.branches) in clean_live_states()
8589 if (sl->state.insn_idx != insn || in clean_live_states()
8590 sl->state.curframe != cur->curframe) in clean_live_states()
8592 for (i = 0; i <= cur->curframe; i++) in clean_live_states()
8593 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) in clean_live_states()
8595 clean_verifier_state(env, &sl->state); in clean_live_states()
8597 sl = sl->next; in clean_live_states()
8607 if (!(rold->live & REG_LIVE_READ)) in regsafe()
8613 if (rold->type == PTR_TO_STACK) in regsafe()
8615 * the same stack frame, since fp-8 in foo != fp-8 in bar in regsafe()
8617 return equal && rold->frameno == rcur->frameno; in regsafe()
8622 if (rold->type == NOT_INIT) in regsafe()
8625 if (rcur->type == NOT_INIT) in regsafe()
8627 switch (rold->type) { in regsafe()
8629 if (rcur->type == SCALAR_VALUE) { in regsafe()
8630 if (!rold->precise && !rcur->precise) in regsafe()
8634 tnum_in(rold->var_off, rcur->var_off); in regsafe()
8656 tnum_in(rold->var_off, rcur->var_off); in regsafe()
8660 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- in regsafe()
8665 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) in regsafe()
8670 return check_ids(rold->id, rcur->id, idmap); in regsafe()
8673 if (rcur->type != rold->type) in regsafe()
8678 * since someone could have accessed through (ptr - k), or in regsafe()
8679 * even done ptr -= k in a register, to get a safe access. in regsafe()
8681 if (rold->range > rcur->range) in regsafe()
8686 if (rold->off != rcur->off) in regsafe()
8689 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) in regsafe()
8693 tnum_in(rold->var_off, rcur->var_off); in regsafe()
8728 for (i = 0; i < old->allocated_stack; i++) { in stacksafe()
8731 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { in stacksafe()
8732 i += BPF_REG_SIZE - 1; in stacksafe()
8737 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
8743 if (i >= cur->allocated_stack) in stacksafe()
8747 * it will be safe with zero-initialized stack. in stacksafe()
8750 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
8751 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
8753 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
8754 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
8756 * this stack slot, but current has STACK_MISC -> in stacksafe()
8763 if (old->stack[spi].slot_type[0] != STACK_SPILL) in stacksafe()
8765 if (!regsafe(&old->stack[spi].spilled_ptr, in stacksafe()
8766 &cur->stack[spi].spilled_ptr, in stacksafe()
8772 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} in stacksafe()
8774 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} in stacksafe()
8785 if (old->acquired_refs != cur->acquired_refs) in refsafe()
8787 return !memcmp(old->refs, cur->refs, in refsafe()
8788 sizeof(*old->refs) * old->acquired_refs); in refsafe()
8830 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) in func_states_equal()
8851 if (old->curframe != cur->curframe) in states_equal()
8855 * must never prune a non-speculative execution one. in states_equal()
8857 if (old->speculative && !cur->speculative) in states_equal()
8860 if (old->active_spin_lock != cur->active_spin_lock) in states_equal()
8866 for (i = 0; i <= old->curframe; i++) { in states_equal()
8867 if (old->frame[i]->callsite != cur->frame[i]->callsite) in states_equal()
8869 if (!func_states_equal(old->frame[i], cur->frame[i])) in states_equal()
8879 struct bpf_reg_state *reg, in propagate_liveness_reg() argument
8882 u8 parent_flag = parent_reg->live & REG_LIVE_READ; in propagate_liveness_reg()
8883 u8 flag = reg->live & REG_LIVE_READ; in propagate_liveness_reg()
8886 /* When comes here, read flags of PARENT_REG or REG could be any of in propagate_liveness_reg()
8891 /* Or if there is no read flag from REG. */ in propagate_liveness_reg()
8893 /* Or if the read flag from REG is the same as PARENT_REG. */ in propagate_liveness_reg()
8897 err = mark_reg_read(env, reg, parent_reg, flag); in propagate_liveness_reg()
8905 * straight-line code between a state and its parent. When we arrive at an
8906 * equivalent state (jump target or such) we didn't arrive by the straight-line
8908 * of the state's write marks. That's what 'parent == state->parent' comparison
8919 if (vparent->curframe != vstate->curframe) { in propagate_liveness()
8921 vparent->curframe, vstate->curframe); in propagate_liveness()
8922 return -EFAULT; in propagate_liveness()
8926 for (frame = 0; frame <= vstate->curframe; frame++) { in propagate_liveness()
8927 parent = vparent->frame[frame]; in propagate_liveness()
8928 state = vstate->frame[frame]; in propagate_liveness()
8929 parent_reg = parent->regs; in propagate_liveness()
8930 state_reg = state->regs; in propagate_liveness()
8931 /* We don't need to worry about FP liveness, it's read-only */ in propagate_liveness()
8932 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { in propagate_liveness()
8942 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
8943 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
8944 parent_reg = &parent->stack[i].spilled_ptr; in propagate_liveness()
8945 state_reg = &state->stack[i].spilled_ptr; in propagate_liveness()
8965 state = old->frame[old->curframe]; in propagate_precision()
8966 state_reg = state->regs; in propagate_precision()
8968 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
8969 !state_reg->precise) in propagate_precision()
8971 if (env->log.level & BPF_LOG_LEVEL2) in propagate_precision()
8978 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
8979 if (state->stack[i].slot_type[0] != STACK_SPILL) in propagate_precision()
8981 state_reg = &state->stack[i].spilled_ptr; in propagate_precision()
8982 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
8983 !state_reg->precise) in propagate_precision()
8985 if (env->log.level & BPF_LOG_LEVEL2) in propagate_precision()
8987 (-i - 1) * BPF_REG_SIZE); in propagate_precision()
8999 int i, fr = cur->curframe; in states_maybe_looping()
9001 if (old->curframe != fr) in states_maybe_looping()
9004 fold = old->frame[fr]; in states_maybe_looping()
9005 fcur = cur->frame[fr]; in states_maybe_looping()
9007 if (memcmp(&fold->regs[i], &fcur->regs[i], in states_maybe_looping()
9018 struct bpf_verifier_state *cur = env->cur_state, *new; in is_state_visited()
9020 bool add_new_state = env->test_state_freq ? true : false; in is_state_visited()
9022 cur->last_insn_idx = env->prev_insn_idx; in is_state_visited()
9023 if (!env->insn_aux_data[insn_idx].prune_point) in is_state_visited()
9030 * http://vger.kernel.org/bpfconf2019.html#session-1 in is_state_visited()
9037 if (env->jmps_processed - env->prev_jmps_processed >= 2 && in is_state_visited()
9038 env->insn_processed - env->prev_insn_processed >= 8) in is_state_visited()
9048 if (sl->state.insn_idx != insn_idx) in is_state_visited()
9050 if (sl->state.branches) { in is_state_visited()
9051 if (states_maybe_looping(&sl->state, cur) && in is_state_visited()
9052 states_equal(env, &sl->state, cur)) { in is_state_visited()
9055 return -EINVAL; in is_state_visited()
9064 * if r1 < 1000000 goto pc-2 in is_state_visited()
9069 if (env->jmps_processed - env->prev_jmps_processed < 20 && in is_state_visited()
9070 env->insn_processed - env->prev_insn_processed < 100) in is_state_visited()
9074 if (states_equal(env, &sl->state, cur)) { in is_state_visited()
9075 sl->hit_cnt++; in is_state_visited()
9079 * If we have any write marks in env->cur_state, they in is_state_visited()
9086 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
9094 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
9107 sl->miss_cnt++; in is_state_visited()
9113 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { in is_state_visited()
9117 *pprev = sl->next; in is_state_visited()
9118 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { in is_state_visited()
9119 u32 br = sl->state.branches; in is_state_visited()
9124 free_verifier_state(&sl->state, false); in is_state_visited()
9126 env->peak_states--; in is_state_visited()
9132 sl->next = env->free_list; in is_state_visited()
9133 env->free_list = sl; in is_state_visited()
9139 pprev = &sl->next; in is_state_visited()
9143 if (env->max_states_per_insn < states_cnt) in is_state_visited()
9144 env->max_states_per_insn = states_cnt; in is_state_visited()
9146 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) in is_state_visited()
9158 * When looping the sl->state.branches will be > 0 and this state in is_state_visited()
9163 return -ENOMEM; in is_state_visited()
9164 env->total_states++; in is_state_visited()
9165 env->peak_states++; in is_state_visited()
9166 env->prev_jmps_processed = env->jmps_processed; in is_state_visited()
9167 env->prev_insn_processed = env->insn_processed; in is_state_visited()
9170 new = &new_sl->state; in is_state_visited()
9177 new->insn_idx = insn_idx; in is_state_visited()
9178 WARN_ONCE(new->branches != 1, in is_state_visited()
9179 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); in is_state_visited()
9181 cur->parent = new; in is_state_visited()
9182 cur->first_insn_idx = insn_idx; in is_state_visited()
9184 new_sl->next = *explored_state(env, insn_idx); in is_state_visited()
9187 * registers connected. Only r6 - r9 of the callers are alive (pushed in is_state_visited()
9189 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to in is_state_visited()
9199 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
9200 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) in is_state_visited()
9201 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; in is_state_visited()
9203 cur->frame[j]->regs[i].live = REG_LIVE_NONE; in is_state_visited()
9207 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
9208 struct bpf_func_state *frame = cur->frame[j]; in is_state_visited()
9209 struct bpf_func_state *newframe = new->frame[j]; in is_state_visited()
9211 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
9212 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; in is_state_visited()
9213 frame->stack[i].spilled_ptr.parent = in is_state_visited()
9214 &newframe->stack[i].spilled_ptr; in is_state_visited()
9260 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check()
9261 struct bpf_verifier_state *state = env->cur_state; in do_check()
9262 struct bpf_insn *insns = env->prog->insnsi; in do_check()
9264 int insn_cnt = env->prog->len; in do_check()
9266 int prev_insn_idx = -1; in do_check()
9273 env->prev_insn_idx = prev_insn_idx; in do_check()
9274 if (env->insn_idx >= insn_cnt) { in do_check()
9276 env->insn_idx, insn_cnt); in do_check()
9277 return -EFAULT; in do_check()
9280 insn = &insns[env->insn_idx]; in do_check()
9281 class = BPF_CLASS(insn->code); in do_check()
9283 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { in do_check()
9286 env->insn_processed); in do_check()
9287 return -E2BIG; in do_check()
9290 err = is_state_visited(env, env->insn_idx); in do_check()
9295 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
9298 env->prev_insn_idx, env->insn_idx, in do_check()
9299 env->cur_state->speculative ? in do_check()
9302 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
9308 return -EAGAIN; in do_check()
9313 if (env->log.level & BPF_LOG_LEVEL2 || in do_check()
9314 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { in do_check()
9315 if (env->log.level & BPF_LOG_LEVEL2) in do_check()
9316 verbose(env, "%d:", env->insn_idx); in do_check()
9319 env->prev_insn_idx, env->insn_idx, in do_check()
9320 env->cur_state->speculative ? in do_check()
9322 print_verifier_state(env, state->frame[state->curframe]); in do_check()
9326 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
9332 verbose_linfo(env, env->insn_idx, "; "); in do_check()
9333 verbose(env, "%d: ", env->insn_idx); in do_check()
9334 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in do_check()
9337 if (bpf_prog_is_dev_bound(env->prog->aux)) { in do_check()
9338 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
9339 env->prev_insn_idx); in do_check()
9345 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in do_check()
9346 prev_insn_idx = env->insn_idx; in do_check()
9359 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
9363 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in do_check()
9367 src_reg_type = regs[insn->src_reg].type; in do_check()
9372 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
9373 insn->off, BPF_SIZE(insn->code), in do_check()
9374 BPF_READ, insn->dst_reg, false); in do_check()
9378 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; in do_check()
9396 return -EINVAL; in do_check()
9402 if (BPF_MODE(insn->code) == BPF_XADD) { in do_check()
9403 err = check_xadd(env, env->insn_idx, insn); in do_check()
9406 env->insn_idx++; in do_check()
9411 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
9415 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
9419 dst_reg_type = regs[insn->dst_reg].type; in do_check()
9422 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
9423 insn->off, BPF_SIZE(insn->code), in do_check()
9424 BPF_WRITE, insn->src_reg, false); in do_check()
9428 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; in do_check()
9434 return -EINVAL; in do_check()
9438 if (BPF_MODE(insn->code) != BPF_MEM || in do_check()
9439 insn->src_reg != BPF_REG_0) { in do_check()
9441 return -EINVAL; in do_check()
9444 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
9448 if (is_ctx_reg(env, insn->dst_reg)) { in do_check()
9450 insn->dst_reg, in do_check()
9451 reg_type_str[reg_state(env, insn->dst_reg)->type]); in do_check()
9452 return -EACCES; in do_check()
9456 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
9457 insn->off, BPF_SIZE(insn->code), in do_check()
9458 BPF_WRITE, -1, false); in do_check()
9463 u8 opcode = BPF_OP(insn->code); in do_check()
9465 env->jmps_processed++; in do_check()
9467 if (BPF_SRC(insn->code) != BPF_K || in do_check()
9468 insn->off != 0 || in do_check()
9469 (insn->src_reg != BPF_REG_0 && in do_check()
9470 insn->src_reg != BPF_PSEUDO_CALL) || in do_check()
9471 insn->dst_reg != BPF_REG_0 || in do_check()
9474 return -EINVAL; in do_check()
9477 if (env->cur_state->active_spin_lock && in do_check()
9478 (insn->src_reg == BPF_PSEUDO_CALL || in do_check()
9479 insn->imm != BPF_FUNC_spin_unlock)) { in do_check()
9481 return -EINVAL; in do_check()
9483 if (insn->src_reg == BPF_PSEUDO_CALL) in do_check()
9484 err = check_func_call(env, insn, &env->insn_idx); in do_check()
9486 err = check_helper_call(env, insn->imm, env->insn_idx); in do_check()
9491 if (BPF_SRC(insn->code) != BPF_K || in do_check()
9492 insn->imm != 0 || in do_check()
9493 insn->src_reg != BPF_REG_0 || in do_check()
9494 insn->dst_reg != BPF_REG_0 || in do_check()
9497 return -EINVAL; in do_check()
9500 env->insn_idx += insn->off + 1; in do_check()
9504 if (BPF_SRC(insn->code) != BPF_K || in do_check()
9505 insn->imm != 0 || in do_check()
9506 insn->src_reg != BPF_REG_0 || in do_check()
9507 insn->dst_reg != BPF_REG_0 || in do_check()
9510 return -EINVAL; in do_check()
9513 if (env->cur_state->active_spin_lock) { in do_check()
9515 return -EINVAL; in do_check()
9518 if (state->curframe) { in do_check()
9520 err = prepare_func_exit(env, &env->insn_idx); in do_check()
9535 update_branch_counts(env, env->cur_state); in do_check()
9537 &env->insn_idx, pop_log); in do_check()
9539 if (err != -ENOENT) in do_check()
9547 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
9552 u8 mode = BPF_MODE(insn->code); in do_check()
9564 env->insn_idx++; in do_check()
9565 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in do_check()
9568 return -EINVAL; in do_check()
9572 return -EINVAL; in do_check()
9575 env->insn_idx++; in do_check()
9591 u32 type, id = insn->imm; in check_pseudo_btf_id()
9598 return -EINVAL; in check_pseudo_btf_id()
9603 return -EINVAL; in check_pseudo_btf_id()
9609 return -ENOENT; in check_pseudo_btf_id()
9615 return -EINVAL; in check_pseudo_btf_id()
9618 sym_name = btf_name_by_offset(btf_vmlinux, t->name_off); in check_pseudo_btf_id()
9623 return -ENOENT; in check_pseudo_btf_id()
9631 if (vsi->type == id) { in check_pseudo_btf_id()
9641 type = t->type; in check_pseudo_btf_id()
9644 aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID; in check_pseudo_btf_id()
9645 aux->btf_var.btf_id = type; in check_pseudo_btf_id()
9654 tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_pseudo_btf_id()
9657 return -EINVAL; in check_pseudo_btf_id()
9659 aux->btf_var.reg_type = PTR_TO_MEM; in check_pseudo_btf_id()
9660 aux->btf_var.mem_size = tsize; in check_pseudo_btf_id()
9662 aux->btf_var.reg_type = PTR_TO_BTF_ID; in check_pseudo_btf_id()
9663 aux->btf_var.btf_id = type; in check_pseudo_btf_id()
9670 return (map->map_type != BPF_MAP_TYPE_HASH && in check_map_prealloc()
9671 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in check_map_prealloc()
9672 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || in check_map_prealloc()
9673 !(map->map_flags & BPF_F_NO_PREALLOC); in check_map_prealloc()
9693 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) in is_preallocated_map()
9715 * On RT enabled kernels run-time allocation of all trace type in check_map_prog_compatibility()
9724 return -EINVAL; in check_map_prog_compatibility()
9728 return -EINVAL; in check_map_prog_compatibility()
9730 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n"); in check_map_prog_compatibility()
9731 …verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to prealloc… in check_map_prog_compatibility()
9738 return -EINVAL; in check_map_prog_compatibility()
9741 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && in check_map_prog_compatibility()
9744 return -EINVAL; in check_map_prog_compatibility()
9747 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in check_map_prog_compatibility()
9749 return -EINVAL; in check_map_prog_compatibility()
9752 if (prog->aux->sleepable) in check_map_prog_compatibility()
9753 switch (map->map_type) { in check_map_prog_compatibility()
9760 return -EINVAL; in check_map_prog_compatibility()
9766 return -EINVAL; in check_map_prog_compatibility()
9774 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || in bpf_map_is_cgroup_storage()
9775 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); in bpf_map_is_cgroup_storage()
9787 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64()
9788 int insn_cnt = env->prog->len; in resolve_pseudo_ldimm64()
9791 err = bpf_prog_calc_tag(env->prog); in resolve_pseudo_ldimm64()
9796 if (BPF_CLASS(insn->code) == BPF_LDX && in resolve_pseudo_ldimm64()
9797 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { in resolve_pseudo_ldimm64()
9799 return -EINVAL; in resolve_pseudo_ldimm64()
9802 if (BPF_CLASS(insn->code) == BPF_STX && in resolve_pseudo_ldimm64()
9803 ((BPF_MODE(insn->code) != BPF_MEM && in resolve_pseudo_ldimm64()
9804 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { in resolve_pseudo_ldimm64()
9806 return -EINVAL; in resolve_pseudo_ldimm64()
9815 if (i == insn_cnt - 1 || insn[1].code != 0 || in resolve_pseudo_ldimm64()
9819 return -EINVAL; in resolve_pseudo_ldimm64()
9823 /* valid generic load 64-bit imm */ in resolve_pseudo_ldimm64()
9827 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
9835 * converted into regular 64-bit imm load insn. in resolve_pseudo_ldimm64()
9843 return -EINVAL; in resolve_pseudo_ldimm64()
9854 err = check_map_prog_compatibility(env, map, env->prog); in resolve_pseudo_ldimm64()
9860 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
9861 if (insn->src_reg == BPF_PSEUDO_MAP_FD) { in resolve_pseudo_ldimm64()
9869 return -EINVAL; in resolve_pseudo_ldimm64()
9872 if (!map->ops->map_direct_value_addr) { in resolve_pseudo_ldimm64()
9875 return -EINVAL; in resolve_pseudo_ldimm64()
9878 err = map->ops->map_direct_value_addr(map, &addr, off); in resolve_pseudo_ldimm64()
9881 map->value_size, off); in resolve_pseudo_ldimm64()
9886 aux->map_off = off; in resolve_pseudo_ldimm64()
9894 for (j = 0; j < env->used_map_cnt; j++) { in resolve_pseudo_ldimm64()
9895 if (env->used_maps[j] == map) { in resolve_pseudo_ldimm64()
9896 aux->map_index = j; in resolve_pseudo_ldimm64()
9902 if (env->used_map_cnt >= MAX_USED_MAPS) { in resolve_pseudo_ldimm64()
9904 return -E2BIG; in resolve_pseudo_ldimm64()
9914 aux->map_index = env->used_map_cnt; in resolve_pseudo_ldimm64()
9915 env->used_maps[env->used_map_cnt++] = map; in resolve_pseudo_ldimm64()
9918 bpf_cgroup_storage_assign(env->prog->aux, map)) { in resolve_pseudo_ldimm64()
9921 return -EBUSY; in resolve_pseudo_ldimm64()
9932 if (!bpf_opcode_in_insntable(insn->code)) { in resolve_pseudo_ldimm64()
9933 verbose(env, "unknown opcode %02x\n", insn->code); in resolve_pseudo_ldimm64()
9934 return -EINVAL; in resolve_pseudo_ldimm64()
9948 __bpf_free_used_maps(env->prog->aux, env->used_maps, in release_maps()
9949 env->used_map_cnt); in release_maps()
9955 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
9956 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
9960 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) in convert_pseudo_ld_imm64()
9961 insn->src_reg = 0; in convert_pseudo_ld_imm64()
9964 /* single env->prog->insni[off] instruction was replaced with the range
9971 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; in adjust_insn_aux_data()
9972 struct bpf_insn *insn = new_prog->insnsi; in adjust_insn_aux_data()
9980 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); in adjust_insn_aux_data()
9984 prog_len = new_prog->len; in adjust_insn_aux_data()
9988 return -ENOMEM; in adjust_insn_aux_data()
9990 memcpy(new_data + off + cnt - 1, old_data + off, in adjust_insn_aux_data()
9991 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); in adjust_insn_aux_data()
9992 for (i = off; i < off + cnt - 1; i++) { in adjust_insn_aux_data()
9993 new_data[i].seen = env->pass_cnt; in adjust_insn_aux_data()
9996 env->insn_aux_data = new_data; in adjust_insn_aux_data()
10008 for (i = 0; i <= env->subprog_cnt; i++) { in adjust_subprog_starts()
10009 if (env->subprog_info[i].start <= off) in adjust_subprog_starts()
10011 env->subprog_info[i].start += len - 1; in adjust_subprog_starts()
10017 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; in adjust_poke_descs()
10018 int i, sz = prog->aux->size_poke_tab; in adjust_poke_descs()
10023 desc->insn_idx += len - 1; in adjust_poke_descs()
10032 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
10034 if (PTR_ERR(new_prog) == -ERANGE) in bpf_patch_insn_data()
10036 "insn %d cannot be patched due to 16-bit range\n", in bpf_patch_insn_data()
10037 env->insn_aux_data[off].orig_idx); in bpf_patch_insn_data()
10053 for (i = 0; i < env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
10054 if (env->subprog_info[i].start >= off) in adjust_subprog_starts_after_remove()
10057 for (j = i; j < env->subprog_cnt; j++) in adjust_subprog_starts_after_remove()
10058 if (env->subprog_info[j].start >= off + cnt) in adjust_subprog_starts_after_remove()
10063 if (env->subprog_info[j].start != off + cnt) in adjust_subprog_starts_after_remove()
10064 j--; in adjust_subprog_starts_after_remove()
10067 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
10071 move = env->subprog_cnt + 1 - j; in adjust_subprog_starts_after_remove()
10073 memmove(env->subprog_info + i, in adjust_subprog_starts_after_remove()
10074 env->subprog_info + j, in adjust_subprog_starts_after_remove()
10075 sizeof(*env->subprog_info) * move); in adjust_subprog_starts_after_remove()
10076 env->subprog_cnt -= j - i; in adjust_subprog_starts_after_remove()
10079 if (aux->func_info) { in adjust_subprog_starts_after_remove()
10080 move = aux->func_info_cnt - j; in adjust_subprog_starts_after_remove()
10082 memmove(aux->func_info + i, in adjust_subprog_starts_after_remove()
10083 aux->func_info + j, in adjust_subprog_starts_after_remove()
10084 sizeof(*aux->func_info) * move); in adjust_subprog_starts_after_remove()
10085 aux->func_info_cnt -= j - i; in adjust_subprog_starts_after_remove()
10086 /* func_info->insn_off is set after all code rewrites, in adjust_subprog_starts_after_remove()
10087 * in adjust_btf_func() - no need to adjust in adjust_subprog_starts_after_remove()
10092 if (env->subprog_info[i].start == off) in adjust_subprog_starts_after_remove()
10097 for (; i <= env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
10098 env->subprog_info[i].start -= cnt; in adjust_subprog_starts_after_remove()
10106 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove()
10110 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
10114 linfo = prog->aux->linfo; in bpf_adj_linfo_after_remove()
10130 * last removed linfo. prog is already modified, so prog->len == off in bpf_adj_linfo_after_remove()
10133 if (prog->len != off && l_cnt && in bpf_adj_linfo_after_remove()
10135 l_cnt--; in bpf_adj_linfo_after_remove()
10136 linfo[--i].insn_off = off + cnt; in bpf_adj_linfo_after_remove()
10142 sizeof(*linfo) * (nr_linfo - i)); in bpf_adj_linfo_after_remove()
10144 prog->aux->nr_linfo -= l_cnt; in bpf_adj_linfo_after_remove()
10145 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
10150 linfo[i].insn_off -= cnt; in bpf_adj_linfo_after_remove()
10153 for (i = 0; i <= env->subprog_cnt; i++) in bpf_adj_linfo_after_remove()
10154 if (env->subprog_info[i].linfo_idx > l_off) { in bpf_adj_linfo_after_remove()
10158 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) in bpf_adj_linfo_after_remove()
10159 env->subprog_info[i].linfo_idx -= l_cnt; in bpf_adj_linfo_after_remove()
10161 env->subprog_info[i].linfo_idx = l_off; in bpf_adj_linfo_after_remove()
10169 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in verifier_remove_insns()
10170 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
10173 if (bpf_prog_is_dev_bound(env->prog->aux)) in verifier_remove_insns()
10176 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
10189 sizeof(*aux_data) * (orig_prog_len - off - cnt)); in verifier_remove_insns()
10196 * have dead code too. Therefore replace all dead at-run-time code
10197 * with 'ja -1'.
10207 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in sanitize_dead_code()
10208 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); in sanitize_dead_code()
10209 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
10210 const int insn_cnt = env->prog->len; in sanitize_dead_code()
10236 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_hard_wire_dead_code_branches()
10238 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
10239 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
10243 if (!insn_is_cond_jump(insn->code)) in opt_hard_wire_dead_code_branches()
10247 ja.off = insn->off; in opt_hard_wire_dead_code_branches()
10248 else if (!aux_data[i + 1 + insn->off].seen) in opt_hard_wire_dead_code_branches()
10253 if (bpf_prog_is_dev_bound(env->prog->aux)) in opt_hard_wire_dead_code_branches()
10262 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_remove_dead_code()
10263 int insn_cnt = env->prog->len; in opt_remove_dead_code()
10278 insn_cnt = env->prog->len; in opt_remove_dead_code()
10287 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
10288 int insn_cnt = env->prog->len; in opt_remove_nops()
10298 insn_cnt--; in opt_remove_nops()
10299 i--; in opt_remove_nops()
10309 struct bpf_insn_aux_data *aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
10310 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
10311 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
10315 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; in opt_subreg_zext_lo32_rnd_hi32()
10337 /* NOTE: arg "reg" (the fourth one) is only used for in opt_subreg_zext_lo32_rnd_hi32()
10373 return -ENOMEM; in opt_subreg_zext_lo32_rnd_hi32()
10374 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
10375 insns = new_prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
10376 aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
10377 delta += patch_len - 1; in opt_subreg_zext_lo32_rnd_hi32()
10385 * struct __sk_buff -> struct sk_buff
10386 * struct bpf_sock_ops -> struct sock
10390 const struct bpf_verifier_ops *ops = env->ops; in convert_ctx_accesses()
10392 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
10399 if (ops->gen_prologue || env->seen_direct_write) { in convert_ctx_accesses()
10400 if (!ops->gen_prologue) { in convert_ctx_accesses()
10402 return -EINVAL; in convert_ctx_accesses()
10404 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, in convert_ctx_accesses()
10405 env->prog); in convert_ctx_accesses()
10408 return -EINVAL; in convert_ctx_accesses()
10412 return -ENOMEM; in convert_ctx_accesses()
10414 env->prog = new_prog; in convert_ctx_accesses()
10415 delta += cnt - 1; in convert_ctx_accesses()
10419 if (bpf_prog_is_dev_bound(env->prog->aux)) in convert_ctx_accesses()
10422 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
10427 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
10428 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
10429 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
10430 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) in convert_ctx_accesses()
10432 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
10433 insn->code == (BPF_STX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
10434 insn->code == (BPF_STX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
10435 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) in convert_ctx_accesses()
10441 env->insn_aux_data[i + delta].sanitize_stack_off) { in convert_ctx_accesses()
10449 env->insn_aux_data[i + delta].sanitize_stack_off, in convert_ctx_accesses()
10460 return -ENOMEM; in convert_ctx_accesses()
10462 delta += cnt - 1; in convert_ctx_accesses()
10463 env->prog = new_prog; in convert_ctx_accesses()
10464 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
10468 switch (env->insn_aux_data[i + delta].ptr_type) { in convert_ctx_accesses()
10470 if (!ops->convert_ctx_access) in convert_ctx_accesses()
10472 convert_ctx_access = ops->convert_ctx_access; in convert_ctx_accesses()
10486 insn->code = BPF_LDX | BPF_PROBE_MEM | in convert_ctx_accesses()
10487 BPF_SIZE((insn)->code); in convert_ctx_accesses()
10488 env->prog->aux->num_exentries++; in convert_ctx_accesses()
10489 } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) { in convert_ctx_accesses()
10491 return -EINVAL; in convert_ctx_accesses()
10498 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; in convert_ctx_accesses()
10502 * convert to a 4/8-byte load, to minimum program type specific in convert_ctx_accesses()
10508 off = insn->off; in convert_ctx_accesses()
10514 return -EINVAL; in convert_ctx_accesses()
10523 insn->off = off & ~(size_default - 1); in convert_ctx_accesses()
10524 insn->code = BPF_LDX | BPF_MEM | size_code; in convert_ctx_accesses()
10528 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
10533 return -EINVAL; in convert_ctx_accesses()
10542 insn->dst_reg, in convert_ctx_accesses()
10544 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
10545 (1 << size * 8) - 1); in convert_ctx_accesses()
10549 insn->dst_reg, in convert_ctx_accesses()
10551 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
10552 (1ULL << size * 8) - 1); in convert_ctx_accesses()
10558 return -ENOMEM; in convert_ctx_accesses()
10560 delta += cnt - 1; in convert_ctx_accesses()
10563 env->prog = new_prog; in convert_ctx_accesses()
10564 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
10572 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs()
10579 if (env->subprog_cnt <= 1) in jit_subprogs()
10582 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
10583 if (insn->code != (BPF_JMP | BPF_CALL) || in jit_subprogs()
10584 insn->src_reg != BPF_PSEUDO_CALL) in jit_subprogs()
10587 * need a hard reject of the program. Thus -EFAULT is in jit_subprogs()
10590 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
10593 i + insn->imm + 1); in jit_subprogs()
10594 return -EFAULT; in jit_subprogs()
10599 insn->off = subprog; in jit_subprogs()
10603 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
10605 insn->imm = 1; in jit_subprogs()
10612 err = -ENOMEM; in jit_subprogs()
10613 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
10617 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
10619 subprog_end = env->subprog_info[i + 1].start; in jit_subprogs()
10621 len = subprog_end - subprog_start; in jit_subprogs()
10625 * func[i]->aux->stats will never be accessed and stays NULL in jit_subprogs()
10630 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], in jit_subprogs()
10632 func[i]->type = prog->type; in jit_subprogs()
10633 func[i]->len = len; in jit_subprogs()
10636 func[i]->is_func = 1; in jit_subprogs()
10637 func[i]->aux->func_idx = i; in jit_subprogs()
10638 /* the btf and func_info will be freed only at prog->aux */ in jit_subprogs()
10639 func[i]->aux->btf = prog->aux->btf; in jit_subprogs()
10640 func[i]->aux->func_info = prog->aux->func_info; in jit_subprogs()
10642 for (j = 0; j < prog->aux->size_poke_tab; j++) { in jit_subprogs()
10643 u32 insn_idx = prog->aux->poke_tab[j].insn_idx; in jit_subprogs()
10651 &prog->aux->poke_tab[j]); in jit_subprogs()
10657 func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1; in jit_subprogs()
10659 map_ptr = func[i]->aux->poke_tab[ret].tail_call.map; in jit_subprogs()
10660 ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux); in jit_subprogs()
10668 * Long term would need debug info to populate names in jit_subprogs()
10670 func[i]->aux->name[0] = 'F'; in jit_subprogs()
10671 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; in jit_subprogs()
10672 func[i]->jit_requested = 1; in jit_subprogs()
10673 func[i]->aux->linfo = prog->aux->linfo; in jit_subprogs()
10674 func[i]->aux->nr_linfo = prog->aux->nr_linfo; in jit_subprogs()
10675 func[i]->aux->jited_linfo = prog->aux->jited_linfo; in jit_subprogs()
10676 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; in jit_subprogs()
10678 insn = func[i]->insnsi; in jit_subprogs()
10679 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
10680 if (BPF_CLASS(insn->code) == BPF_LDX && in jit_subprogs()
10681 BPF_MODE(insn->code) == BPF_PROBE_MEM) in jit_subprogs()
10684 func[i]->aux->num_exentries = num_exentries; in jit_subprogs()
10685 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; in jit_subprogs()
10687 if (!func[i]->jited) { in jit_subprogs()
10688 err = -ENOTSUPP; in jit_subprogs()
10700 for (i = 0; i < prog->aux->size_poke_tab; i++) { in jit_subprogs()
10701 map_ptr = prog->aux->poke_tab[i].tail_call.map; in jit_subprogs()
10703 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); in jit_subprogs()
10710 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
10711 insn = func[i]->insnsi; in jit_subprogs()
10712 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
10713 if (insn->code != (BPF_JMP | BPF_CALL) || in jit_subprogs()
10714 insn->src_reg != BPF_PSEUDO_CALL) in jit_subprogs()
10716 subprog = insn->off; in jit_subprogs()
10717 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - in jit_subprogs()
10732 func[i]->aux->func = func; in jit_subprogs()
10733 func[i]->aux->func_cnt = env->subprog_cnt; in jit_subprogs()
10735 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
10736 old_bpf_func = func[i]->bpf_func; in jit_subprogs()
10738 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { in jit_subprogs()
10739 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); in jit_subprogs()
10740 err = -ENOTSUPP; in jit_subprogs()
10749 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
10758 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
10759 if (insn->code != (BPF_JMP | BPF_CALL) || in jit_subprogs()
10760 insn->src_reg != BPF_PSEUDO_CALL) in jit_subprogs()
10762 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
10763 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
10764 insn->imm = subprog; in jit_subprogs()
10767 prog->jited = 1; in jit_subprogs()
10768 prog->bpf_func = func[0]->bpf_func; in jit_subprogs()
10769 prog->aux->func = func; in jit_subprogs()
10770 prog->aux->func_cnt = env->subprog_cnt; in jit_subprogs()
10774 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
10778 for (j = 0; j < func[i]->aux->size_poke_tab; j++) { in jit_subprogs()
10779 map_ptr = func[i]->aux->poke_tab[j].tail_call.map; in jit_subprogs()
10780 map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux); in jit_subprogs()
10787 prog->jit_requested = 0; in jit_subprogs()
10788 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
10789 if (insn->code != (BPF_JMP | BPF_CALL) || in jit_subprogs()
10790 insn->src_reg != BPF_PSEUDO_CALL) in jit_subprogs()
10792 insn->off = 0; in jit_subprogs()
10793 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
10802 struct bpf_prog *prog = env->prog; in fixup_call_args()
10803 struct bpf_insn *insn = prog->insnsi; in fixup_call_args()
10808 if (env->prog->jit_requested && in fixup_call_args()
10809 !bpf_prog_is_dev_bound(env->prog->aux)) { in fixup_call_args()
10813 if (err == -EFAULT) in fixup_call_args()
10817 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { in fixup_call_args()
10821 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in fixup_call_args()
10822 return -EINVAL; in fixup_call_args()
10824 for (i = 0; i < prog->len; i++, insn++) { in fixup_call_args()
10825 if (insn->code != (BPF_JMP | BPF_CALL) || in fixup_call_args()
10826 insn->src_reg != BPF_PSEUDO_CALL) in fixup_call_args()
10838 /* fixup insn->imm field of bpf_call instructions
10845 struct bpf_prog *prog = env->prog; in fixup_bpf_calls()
10847 struct bpf_insn *insn = prog->insnsi; in fixup_bpf_calls()
10849 const int insn_cnt = prog->len; in fixup_bpf_calls()
10858 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || in fixup_bpf_calls()
10859 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || in fixup_bpf_calls()
10860 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || in fixup_bpf_calls()
10861 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { in fixup_bpf_calls()
10862 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in fixup_bpf_calls()
10864 BPF_MOV32_REG(insn->src_reg, insn->src_reg), in fixup_bpf_calls()
10865 /* Rx div 0 -> 0 */ in fixup_bpf_calls()
10866 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), in fixup_bpf_calls()
10867 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), in fixup_bpf_calls()
10872 BPF_MOV32_REG(insn->src_reg, insn->src_reg), in fixup_bpf_calls()
10873 /* Rx mod 0 -> Rx */ in fixup_bpf_calls()
10874 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), in fixup_bpf_calls()
10879 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || in fixup_bpf_calls()
10880 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { in fixup_bpf_calls()
10882 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); in fixup_bpf_calls()
10885 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); in fixup_bpf_calls()
10890 return -ENOMEM; in fixup_bpf_calls()
10892 delta += cnt - 1; in fixup_bpf_calls()
10893 env->prog = prog = new_prog; in fixup_bpf_calls()
10894 insn = new_prog->insnsi + i + delta; in fixup_bpf_calls()
10898 if (BPF_CLASS(insn->code) == BPF_LD && in fixup_bpf_calls()
10899 (BPF_MODE(insn->code) == BPF_ABS || in fixup_bpf_calls()
10900 BPF_MODE(insn->code) == BPF_IND)) { in fixup_bpf_calls()
10901 cnt = env->ops->gen_ld_abs(insn, insn_buf); in fixup_bpf_calls()
10904 return -EINVAL; in fixup_bpf_calls()
10909 return -ENOMEM; in fixup_bpf_calls()
10911 delta += cnt - 1; in fixup_bpf_calls()
10912 env->prog = prog = new_prog; in fixup_bpf_calls()
10913 insn = new_prog->insnsi + i + delta; in fixup_bpf_calls()
10917 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || in fixup_bpf_calls()
10918 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { in fixup_bpf_calls()
10926 aux = &env->insn_aux_data[i + delta]; in fixup_bpf_calls()
10927 if (!aux->alu_state || in fixup_bpf_calls()
10928 aux->alu_state == BPF_ALU_NON_POINTER) in fixup_bpf_calls()
10931 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; in fixup_bpf_calls()
10932 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == in fixup_bpf_calls()
10935 off_reg = issrc ? insn->src_reg : insn->dst_reg; in fixup_bpf_calls()
10937 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in fixup_bpf_calls()
10938 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); in fixup_bpf_calls()
10946 insn->src_reg = BPF_REG_AX; in fixup_bpf_calls()
10952 insn->code = insn->code == code_add ? in fixup_bpf_calls()
10956 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in fixup_bpf_calls()
10957 cnt = patch - insn_buf; in fixup_bpf_calls()
10961 return -ENOMEM; in fixup_bpf_calls()
10963 delta += cnt - 1; in fixup_bpf_calls()
10964 env->prog = prog = new_prog; in fixup_bpf_calls()
10965 insn = new_prog->insnsi + i + delta; in fixup_bpf_calls()
10969 if (insn->code != (BPF_JMP | BPF_CALL)) in fixup_bpf_calls()
10971 if (insn->src_reg == BPF_PSEUDO_CALL) in fixup_bpf_calls()
10974 if (insn->imm == BPF_FUNC_get_route_realm) in fixup_bpf_calls()
10975 prog->dst_needed = 1; in fixup_bpf_calls()
10976 if (insn->imm == BPF_FUNC_get_prandom_u32) in fixup_bpf_calls()
10978 if (insn->imm == BPF_FUNC_override_return) in fixup_bpf_calls()
10979 prog->kprobe_override = 1; in fixup_bpf_calls()
10980 if (insn->imm == BPF_FUNC_tail_call) { in fixup_bpf_calls()
10986 prog->cb_access = 1; in fixup_bpf_calls()
10988 prog->aux->stack_depth = MAX_BPF_STACK; in fixup_bpf_calls()
10989 prog->aux->max_pkt_offset = MAX_PACKET_OFF; in fixup_bpf_calls()
10996 insn->imm = 0; in fixup_bpf_calls()
10997 insn->code = BPF_JMP | BPF_TAIL_CALL; in fixup_bpf_calls()
10999 aux = &env->insn_aux_data[i + delta]; in fixup_bpf_calls()
11000 if (env->bpf_capable && !expect_blinding && in fixup_bpf_calls()
11001 prog->jit_requested && in fixup_bpf_calls()
11007 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), in fixup_bpf_calls()
11018 insn->imm = ret + 1; in fixup_bpf_calls()
11028 * index &= array->index_mask; in fixup_bpf_calls()
11029 * to avoid out-of-bounds cpu speculation in fixup_bpf_calls()
11033 return -EINVAL; in fixup_bpf_calls()
11036 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); in fixup_bpf_calls()
11038 map_ptr->max_entries, 2); in fixup_bpf_calls()
11042 map)->index_mask); in fixup_bpf_calls()
11047 return -ENOMEM; in fixup_bpf_calls()
11049 delta += cnt - 1; in fixup_bpf_calls()
11050 env->prog = prog = new_prog; in fixup_bpf_calls()
11051 insn = new_prog->insnsi + i + delta; in fixup_bpf_calls()
11059 if (prog->jit_requested && BITS_PER_LONG == 64 && in fixup_bpf_calls()
11060 (insn->imm == BPF_FUNC_map_lookup_elem || in fixup_bpf_calls()
11061 insn->imm == BPF_FUNC_map_update_elem || in fixup_bpf_calls()
11062 insn->imm == BPF_FUNC_map_delete_elem || in fixup_bpf_calls()
11063 insn->imm == BPF_FUNC_map_push_elem || in fixup_bpf_calls()
11064 insn->imm == BPF_FUNC_map_pop_elem || in fixup_bpf_calls()
11065 insn->imm == BPF_FUNC_map_peek_elem)) { in fixup_bpf_calls()
11066 aux = &env->insn_aux_data[i + delta]; in fixup_bpf_calls()
11070 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); in fixup_bpf_calls()
11071 ops = map_ptr->ops; in fixup_bpf_calls()
11072 if (insn->imm == BPF_FUNC_map_lookup_elem && in fixup_bpf_calls()
11073 ops->map_gen_lookup) { in fixup_bpf_calls()
11074 cnt = ops->map_gen_lookup(map_ptr, insn_buf); in fixup_bpf_calls()
11075 if (cnt == -EOPNOTSUPP) in fixup_bpf_calls()
11079 return -EINVAL; in fixup_bpf_calls()
11085 return -ENOMEM; in fixup_bpf_calls()
11087 delta += cnt - 1; in fixup_bpf_calls()
11088 env->prog = prog = new_prog; in fixup_bpf_calls()
11089 insn = new_prog->insnsi + i + delta; in fixup_bpf_calls()
11093 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, in fixup_bpf_calls()
11095 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, in fixup_bpf_calls()
11097 BUILD_BUG_ON(!__same_type(ops->map_update_elem, in fixup_bpf_calls()
11100 BUILD_BUG_ON(!__same_type(ops->map_push_elem, in fixup_bpf_calls()
11103 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, in fixup_bpf_calls()
11105 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, in fixup_bpf_calls()
11108 switch (insn->imm) { in fixup_bpf_calls()
11110 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - in fixup_bpf_calls()
11114 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - in fixup_bpf_calls()
11118 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - in fixup_bpf_calls()
11122 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - in fixup_bpf_calls()
11126 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - in fixup_bpf_calls()
11130 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - in fixup_bpf_calls()
11138 if (prog->jit_requested && BITS_PER_LONG == 64 && in fixup_bpf_calls()
11139 insn->imm == BPF_FUNC_jiffies64) { in fixup_bpf_calls()
11154 return -ENOMEM; in fixup_bpf_calls()
11156 delta += cnt - 1; in fixup_bpf_calls()
11157 env->prog = prog = new_prog; in fixup_bpf_calls()
11158 insn = new_prog->insnsi + i + delta; in fixup_bpf_calls()
11163 fn = env->ops->get_func_proto(insn->imm, env->prog); in fixup_bpf_calls()
11165 * programs to call them, must be real in-kernel functions in fixup_bpf_calls()
11167 if (!fn->func) { in fixup_bpf_calls()
11170 func_id_name(insn->imm), insn->imm); in fixup_bpf_calls()
11171 return -EFAULT; in fixup_bpf_calls()
11173 insn->imm = fn->func - __bpf_call_base; in fixup_bpf_calls()
11177 for (i = 0; i < prog->aux->size_poke_tab; i++) { in fixup_bpf_calls()
11178 map_ptr = prog->aux->poke_tab[i].tail_call.map; in fixup_bpf_calls()
11179 if (!map_ptr->ops->map_poke_track || in fixup_bpf_calls()
11180 !map_ptr->ops->map_poke_untrack || in fixup_bpf_calls()
11181 !map_ptr->ops->map_poke_run) { in fixup_bpf_calls()
11183 return -EINVAL; in fixup_bpf_calls()
11186 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); in fixup_bpf_calls()
11201 sl = env->free_list; in free_states()
11203 sln = sl->next; in free_states()
11204 free_verifier_state(&sl->state, false); in free_states()
11208 env->free_list = NULL; in free_states()
11210 if (!env->explored_states) in free_states()
11214 sl = env->explored_states[i]; in free_states()
11217 sln = sl->next; in free_states()
11218 free_verifier_state(&sl->state, false); in free_states()
11222 env->explored_states[i] = NULL; in free_states()
11232 * do_check_common() was run and insn->aux->seen tells the pass number
11239 struct bpf_insn *insn = env->prog->insnsi; in sanitize_insn_aux_data()
11243 for (i = 0; i < env->prog->len; i++) { in sanitize_insn_aux_data()
11247 aux = &env->insn_aux_data[i]; in sanitize_insn_aux_data()
11248 if (aux->seen != env->pass_cnt) in sanitize_insn_aux_data()
11256 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check_common()
11261 env->prev_linfo = NULL; in do_check_common()
11262 env->pass_cnt++; in do_check_common()
11266 return -ENOMEM; in do_check_common()
11267 state->curframe = 0; in do_check_common()
11268 state->speculative = false; in do_check_common()
11269 state->branches = 1; in do_check_common()
11270 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); in do_check_common()
11271 if (!state->frame[0]) { in do_check_common()
11273 return -ENOMEM; in do_check_common()
11275 env->cur_state = state; in do_check_common()
11276 init_func_state(env, state->frame[0], in do_check_common()
11281 regs = state->frame[state->curframe]->regs; in do_check_common()
11282 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { in do_check_common()
11297 if (ret == -EFAULT) in do_check_common()
11315 if (env->cur_state) { in do_check_common()
11316 free_verifier_state(env->cur_state, true); in do_check_common()
11317 env->cur_state = NULL; in do_check_common()
11321 bpf_vlog_reset(&env->log, 0); in do_check_common()
11348 struct bpf_prog_aux *aux = env->prog->aux; in do_check_subprogs()
11351 if (!aux->func_info) in do_check_subprogs()
11354 for (i = 1; i < env->subprog_cnt; i++) { in do_check_subprogs()
11355 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) in do_check_subprogs()
11357 env->insn_idx = env->subprog_info[i].start; in do_check_subprogs()
11358 WARN_ON_ONCE(env->insn_idx == 0); in do_check_subprogs()
11362 } else if (env->log.level & BPF_LOG_LEVEL) { in do_check_subprogs()
11375 env->insn_idx = 0; in do_check_main()
11378 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check_main()
11387 if (env->log.level & BPF_LOG_STATS) { in print_verification_stats()
11389 div_u64(env->verification_time, 1000)); in print_verification_stats()
11391 for (i = 0; i < env->subprog_cnt; i++) { in print_verification_stats()
11392 u32 depth = env->subprog_info[i].stack_depth; in print_verification_stats()
11395 if (i + 1 < env->subprog_cnt) in print_verification_stats()
11402 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, in print_verification_stats()
11403 env->max_states_per_insn, env->total_states, in print_verification_stats()
11404 env->peak_states, env->longest_mark_read_walk); in print_verification_stats()
11412 struct bpf_prog *prog = env->prog; in check_struct_ops_btf_id()
11416 btf_id = prog->aux->attach_btf_id; in check_struct_ops_btf_id()
11421 return -ENOTSUPP; in check_struct_ops_btf_id()
11424 t = st_ops->type; in check_struct_ops_btf_id()
11425 member_idx = prog->expected_attach_type; in check_struct_ops_btf_id()
11428 member_idx, st_ops->name); in check_struct_ops_btf_id()
11429 return -EINVAL; in check_struct_ops_btf_id()
11433 mname = btf_name_by_offset(btf_vmlinux, member->name_off); in check_struct_ops_btf_id()
11434 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, in check_struct_ops_btf_id()
11438 mname, member_idx, st_ops->name); in check_struct_ops_btf_id()
11439 return -EINVAL; in check_struct_ops_btf_id()
11442 if (st_ops->check_member) { in check_struct_ops_btf_id()
11443 int err = st_ops->check_member(t, member); in check_struct_ops_btf_id()
11447 mname, st_ops->name); in check_struct_ops_btf_id()
11452 prog->aux->attach_func_proto = func_proto; in check_struct_ops_btf_id()
11453 prog->aux->attach_func_name = mname; in check_struct_ops_btf_id()
11454 env->ops = st_ops->verifier_ops; in check_struct_ops_btf_id()
11463 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) in check_attach_modify_return()
11466 return -EINVAL; in check_attach_modify_return()
11483 /* list of non-sleepable functions that are otherwise on
11487 /* Three functions below can be called from sleepable and non-sleepable context.
11488 * Assume non-sleepable from bpf safety point of view.
11506 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; in bpf_check_attach_target()
11508 int ret = 0, subprog = -1, i; in bpf_check_attach_target()
11517 return -EINVAL; in bpf_check_attach_target()
11519 btf = tgt_prog ? tgt_prog->aux->btf : btf_vmlinux; in bpf_check_attach_target()
11523 return -EINVAL; in bpf_check_attach_target()
11528 return -EINVAL; in bpf_check_attach_target()
11530 tname = btf_name_by_offset(btf, t->name_off); in bpf_check_attach_target()
11533 return -EINVAL; in bpf_check_attach_target()
11536 struct bpf_prog_aux *aux = tgt_prog->aux; in bpf_check_attach_target()
11538 for (i = 0; i < aux->func_info_cnt; i++) in bpf_check_attach_target()
11539 if (aux->func_info[i].type_id == btf_id) { in bpf_check_attach_target()
11543 if (subprog == -1) { in bpf_check_attach_target()
11545 return -EINVAL; in bpf_check_attach_target()
11547 conservative = aux->func_info_aux[subprog].unreliable; in bpf_check_attach_target()
11552 return -EINVAL; in bpf_check_attach_target()
11554 if (!prog->jit_requested) { in bpf_check_attach_target()
11557 return -EINVAL; in bpf_check_attach_target()
11560 if (!tgt_prog->jited) { in bpf_check_attach_target()
11562 return -EINVAL; in bpf_check_attach_target()
11564 if (tgt_prog->type == prog->type) { in bpf_check_attach_target()
11570 return -EINVAL; in bpf_check_attach_target()
11572 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && in bpf_check_attach_target()
11574 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || in bpf_check_attach_target()
11575 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { in bpf_check_attach_target()
11587 * fentry->extension->fentry->extension beyond in bpf_check_attach_target()
11592 return -EINVAL; in bpf_check_attach_target()
11597 return -EINVAL; in bpf_check_attach_target()
11601 switch (prog->expected_attach_type) { in bpf_check_attach_target()
11606 return -EINVAL; in bpf_check_attach_target()
11611 return -EINVAL; in bpf_check_attach_target()
11613 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { in bpf_check_attach_target()
11616 return -EINVAL; in bpf_check_attach_target()
11618 tname += sizeof(prefix) - 1; in bpf_check_attach_target()
11619 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
11622 return -EINVAL; in bpf_check_attach_target()
11623 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
11626 return -EINVAL; in bpf_check_attach_target()
11633 return -EINVAL; in bpf_check_attach_target()
11635 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
11637 return -EINVAL; in bpf_check_attach_target()
11638 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
11644 return -EINVAL; in bpf_check_attach_target()
11653 return -EINVAL; in bpf_check_attach_target()
11657 return -EINVAL; in bpf_check_attach_target()
11658 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
11660 return -EINVAL; in bpf_check_attach_target()
11662 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && in bpf_check_attach_target()
11663 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || in bpf_check_attach_target()
11664 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) in bpf_check_attach_target()
11665 return -EINVAL; in bpf_check_attach_target()
11670 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
11676 addr = (long) tgt_prog->bpf_func; in bpf_check_attach_target()
11678 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; in bpf_check_attach_target()
11685 return -ENOENT; in bpf_check_attach_target()
11689 if (prog->aux->sleepable) { in bpf_check_attach_target()
11690 ret = -EINVAL; in bpf_check_attach_target()
11691 switch (prog->type) { in bpf_check_attach_target()
11714 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { in bpf_check_attach_target()
11717 return -EINVAL; in bpf_check_attach_target()
11728 tgt_info->tgt_addr = addr; in bpf_check_attach_target()
11729 tgt_info->tgt_name = tname; in bpf_check_attach_target()
11730 tgt_info->tgt_type = t; in bpf_check_attach_target()
11736 struct bpf_prog *prog = env->prog; in check_attach_btf_id()
11737 struct bpf_prog *tgt_prog = prog->aux->dst_prog; in check_attach_btf_id()
11739 u32 btf_id = prog->aux->attach_btf_id; in check_attach_btf_id()
11744 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
11745 prog->type != BPF_PROG_TYPE_LSM) { in check_attach_btf_id()
11747 return -EINVAL; in check_attach_btf_id()
11750 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) in check_attach_btf_id()
11753 if (prog->type != BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
11754 prog->type != BPF_PROG_TYPE_LSM && in check_attach_btf_id()
11755 prog->type != BPF_PROG_TYPE_EXT) in check_attach_btf_id()
11758 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); in check_attach_btf_id()
11762 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { in check_attach_btf_id()
11764 * inherit env->ops and expected_attach_type for the rest of the in check_attach_btf_id()
11767 env->ops = bpf_verifier_ops[tgt_prog->type]; in check_attach_btf_id()
11768 prog->expected_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
11772 prog->aux->attach_func_proto = tgt_info.tgt_type; in check_attach_btf_id()
11773 prog->aux->attach_func_name = tgt_info.tgt_name; in check_attach_btf_id()
11776 prog->aux->saved_dst_prog_type = tgt_prog->type; in check_attach_btf_id()
11777 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
11780 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { in check_attach_btf_id()
11781 prog->aux->attach_btf_trace = true; in check_attach_btf_id()
11783 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { in check_attach_btf_id()
11785 return -EINVAL; in check_attach_btf_id()
11789 if (prog->type == BPF_PROG_TYPE_LSM) { in check_attach_btf_id()
11790 ret = bpf_lsm_verify_prog(&env->log, prog); in check_attach_btf_id()
11798 return -ENOMEM; in check_attach_btf_id()
11800 prog->aux->dst_trampoline = tr; in check_attach_btf_id()
11821 int i, len, ret = -EINVAL; in bpf_check()
11826 return -EINVAL; in bpf_check()
11833 return -ENOMEM; in bpf_check()
11834 log = &env->log; in bpf_check()
11836 len = (*prog)->len; in bpf_check()
11837 env->insn_aux_data = in bpf_check()
11839 ret = -ENOMEM; in bpf_check()
11840 if (!env->insn_aux_data) in bpf_check()
11843 env->insn_aux_data[i].orig_idx = i; in bpf_check()
11844 env->prog = *prog; in bpf_check()
11845 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
11854 if (attr->log_level || attr->log_buf || attr->log_size) { in bpf_check()
11858 log->level = attr->log_level; in bpf_check()
11859 log->ubuf = (char __user *) (unsigned long) attr->log_buf; in bpf_check()
11860 log->len_total = attr->log_size; in bpf_check()
11862 ret = -EINVAL; in bpf_check()
11864 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || in bpf_check()
11865 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) in bpf_check()
11871 verbose(env, "in-kernel BTF is malformed\n"); in bpf_check()
11876 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); in bpf_check()
11878 env->strict_alignment = true; in bpf_check()
11879 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) in bpf_check()
11880 env->strict_alignment = false; in bpf_check()
11882 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); in bpf_check()
11883 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); in bpf_check()
11884 env->bypass_spec_v1 = bpf_bypass_spec_v1(); in bpf_check()
11885 env->bypass_spec_v4 = bpf_bypass_spec_v4(); in bpf_check()
11886 env->bpf_capable = bpf_capable(); in bpf_check()
11889 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; in bpf_check()
11891 if (bpf_prog_is_dev_bound(env->prog->aux)) { in bpf_check()
11892 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
11897 env->explored_states = kvcalloc(state_htab_size(env), in bpf_check()
11900 ret = -ENOMEM; in bpf_check()
11901 if (!env->explored_states) in bpf_check()
11927 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) in bpf_check()
11931 kvfree(env->explored_states); in bpf_check()
11956 /* do 32-bit optimization after insn patching has done so those patched in bpf_check()
11959 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { in bpf_check()
11961 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
11968 env->verification_time = ktime_get_ns() - start_time; in bpf_check()
11971 if (log->level && bpf_verifier_log_full(log)) in bpf_check()
11972 ret = -ENOSPC; in bpf_check()
11973 if (log->level && !log->ubuf) { in bpf_check()
11974 ret = -EFAULT; in bpf_check()
11978 if (ret == 0 && env->used_map_cnt) { in bpf_check()
11980 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
11981 sizeof(env->used_maps[0]), in bpf_check()
11984 if (!env->prog->aux->used_maps) { in bpf_check()
11985 ret = -ENOMEM; in bpf_check()
11989 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
11990 sizeof(env->used_maps[0]) * env->used_map_cnt); in bpf_check()
11991 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
12003 if (!env->prog->aux->used_maps) in bpf_check()
12012 if (env->prog->type == BPF_PROG_TYPE_EXT) in bpf_check()
12013 env->prog->expected_attach_type = 0; in bpf_check()
12015 *prog = env->prog; in bpf_check()
12019 vfree(env->insn_aux_data); in bpf_check()