Lines Matching full:log

15 static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)  in bpf_verifier_log_attr_valid()  argument
18 if (!!log->ubuf != !!log->len_total) in bpf_verifier_log_attr_valid()
20 /* log buf without log_level is meaningless */ in bpf_verifier_log_attr_valid()
21 if (log->ubuf && log->level == 0) in bpf_verifier_log_attr_valid()
23 if (log->level & ~BPF_LOG_MASK) in bpf_verifier_log_attr_valid()
25 if (log->len_total > UINT_MAX >> 2) in bpf_verifier_log_attr_valid()
30 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level, in bpf_vlog_init() argument
33 log->level = log_level; in bpf_vlog_init()
34 log->ubuf = log_buf; in bpf_vlog_init()
35 log->len_total = log_size; in bpf_vlog_init()
37 /* log attributes have to be sane */ in bpf_vlog_init()
38 if (!bpf_verifier_log_attr_valid(log)) in bpf_vlog_init()
44 static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len) in bpf_vlog_update_len_max() argument
47 u64 len = log->end_pos + add_len; in bpf_vlog_update_len_max()
49 /* log->len_max could be larger than our current len due to in bpf_vlog_update_len_max()
54 log->len_max = UINT_MAX; in bpf_vlog_update_len_max()
55 else if (len > log->len_max) in bpf_vlog_update_len_max()
56 log->len_max = len; in bpf_vlog_update_len_max()
59 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, in bpf_verifier_vlog() argument
65 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); in bpf_verifier_vlog()
67 if (log->level == BPF_LOG_KERNEL) { in bpf_verifier_vlog()
68 bool newline = n > 0 && log->kbuf[n - 1] == '\n'; in bpf_verifier_vlog()
70 pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n"); in bpf_verifier_vlog()
75 bpf_vlog_update_len_max(log, n); in bpf_verifier_vlog()
77 if (log->level & BPF_LOG_FIXED) { in bpf_verifier_vlog()
80 if (log->end_pos < log->len_total) { in bpf_verifier_vlog()
81 new_n = min_t(u32, log->len_total - log->end_pos, n); in bpf_verifier_vlog()
82 log->kbuf[new_n - 1] = '\0'; in bpf_verifier_vlog()
85 cur_pos = log->end_pos; in bpf_verifier_vlog()
86 log->end_pos += n - 1; /* don't count terminating '\0' */ in bpf_verifier_vlog()
88 if (log->ubuf && new_n && in bpf_verifier_vlog()
89 copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n)) in bpf_verifier_vlog()
95 new_end = log->end_pos + n; in bpf_verifier_vlog()
96 if (new_end - log->start_pos >= log->len_total) in bpf_verifier_vlog()
97 new_start = new_end - log->len_total; in bpf_verifier_vlog()
99 new_start = log->start_pos; in bpf_verifier_vlog()
101 log->start_pos = new_start; in bpf_verifier_vlog()
102 log->end_pos = new_end - 1; /* don't count terminating '\0' */ in bpf_verifier_vlog()
104 if (!log->ubuf) in bpf_verifier_vlog()
107 new_n = min(n, log->len_total); in bpf_verifier_vlog()
109 div_u64_rem(cur_pos, log->len_total, &buf_start); in bpf_verifier_vlog()
110 div_u64_rem(new_end, log->len_total, &buf_end); in bpf_verifier_vlog()
116 buf_end = log->len_total; in bpf_verifier_vlog()
126 if (copy_to_user(log->ubuf + buf_start, in bpf_verifier_vlog()
127 log->kbuf + n - new_n, in bpf_verifier_vlog()
132 if (copy_to_user(log->ubuf + buf_start, in bpf_verifier_vlog()
133 log->kbuf + n - new_n, in bpf_verifier_vlog()
134 log->len_total - buf_start)) in bpf_verifier_vlog()
136 if (copy_to_user(log->ubuf, in bpf_verifier_vlog()
137 log->kbuf + n - buf_end, in bpf_verifier_vlog()
145 log->ubuf = NULL; in bpf_verifier_vlog()
148 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos) in bpf_vlog_reset() argument
153 if (WARN_ON_ONCE(new_pos > log->end_pos)) in bpf_vlog_reset()
156 if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL) in bpf_vlog_reset()
159 /* if position to which we reset is beyond current log window, in bpf_vlog_reset()
161 * start_pos to end up with an empty log (start_pos == end_pos) in bpf_vlog_reset()
163 log->end_pos = new_pos; in bpf_vlog_reset()
164 if (log->end_pos < log->start_pos) in bpf_vlog_reset()
165 log->start_pos = log->end_pos; in bpf_vlog_reset()
167 if (!log->ubuf) in bpf_vlog_reset()
170 if (log->level & BPF_LOG_FIXED) in bpf_vlog_reset()
171 pos = log->end_pos + 1; in bpf_vlog_reset()
173 div_u64_rem(new_pos, log->len_total, &pos); in bpf_vlog_reset()
175 if (pos < log->len_total && put_user(zero, log->ubuf + pos)) in bpf_vlog_reset()
176 log->ubuf = NULL; in bpf_vlog_reset()
187 static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end) in bpf_vlog_reverse_ubuf() argument
189 /* we split log->kbuf into two equal parts for both ends of array */ in bpf_vlog_reverse_ubuf()
190 int n = sizeof(log->kbuf) / 2, nn; in bpf_vlog_reverse_ubuf()
191 char *lbuf = log->kbuf, *rbuf = log->kbuf + n; in bpf_vlog_reverse_ubuf()
202 if (copy_from_user(lbuf, log->ubuf + start, nn)) in bpf_vlog_reverse_ubuf()
204 if (copy_from_user(rbuf, log->ubuf + end - nn, nn)) in bpf_vlog_reverse_ubuf()
213 if (copy_to_user(log->ubuf + start, rbuf, nn)) in bpf_vlog_reverse_ubuf()
215 if (copy_to_user(log->ubuf + end - nn, lbuf, nn)) in bpf_vlog_reverse_ubuf()
225 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual) in bpf_vlog_finalize() argument
231 if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL) in bpf_vlog_finalize()
234 if (!log->ubuf) in bpf_vlog_finalize()
236 /* If we never truncated log, there is nothing to move around. */ in bpf_vlog_finalize()
237 if (log->start_pos == 0) in bpf_vlog_finalize()
240 /* Otherwise we need to rotate log contents to make it start from the in bpf_vlog_finalize()
242 * that if log->start_pos != 0 then we definitely filled up entire log in bpf_vlog_finalize()
244 * the left by (log->start_pos % log->len_total) bytes. in bpf_vlog_finalize()
252 * Let's say we have log buffer that has to be shifted left by 7 bytes in bpf_vlog_finalize()
263 * We'll utilize log->kbuf to read user memory chunk by chunk, swap in bpf_vlog_finalize()
273 div_u64_rem(log->start_pos, log->len_total, &sublen); in bpf_vlog_finalize()
274 sublen = log->len_total - sublen; in bpf_vlog_finalize()
276 err = bpf_vlog_reverse_ubuf(log, 0, log->len_total); in bpf_vlog_finalize()
277 err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen); in bpf_vlog_finalize()
278 err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total); in bpf_vlog_finalize()
280 log->ubuf = NULL; in bpf_vlog_finalize()
283 *log_size_actual = log->len_max; in bpf_vlog_finalize()
285 /* properly initialized log has either both ubuf!=NULL and len_total>0 in bpf_vlog_finalize()
289 if (!!log->ubuf != !!log->len_total) in bpf_vlog_finalize()
293 if (log->ubuf && log->len_max > log->len_total) in bpf_vlog_finalize()
300 * bpf_verifier_log_write() is used to dump the verification trace to the log,
308 if (!bpf_verifier_log_needed(&env->log)) in bpf_verifier_log_write()
312 bpf_verifier_vlog(&env->log, fmt, args); in bpf_verifier_log_write()
317 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, in bpf_log() argument
322 if (!bpf_verifier_log_needed(log)) in bpf_log()
326 bpf_verifier_vlog(log, fmt, args); in bpf_log()
366 if (!bpf_verifier_log_needed(&env->log)) in verbose_linfo()
377 bpf_verifier_vlog(&env->log, prefix_fmt, args); in verbose_linfo()
823 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) { in print_insn_state()
825 bpf_vlog_reset(&env->log, env->prev_log_pos - 1); in print_insn_state()