xref: /linux/tools/testing/selftests/bpf/progs/verifier_precision.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023 SUSE LLC */
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "../../../include/linux/filter.h"
6 #include "bpf_misc.h"
7 
8 SEC("?raw_tp")
9 __success __log_level(2)
10 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
11 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
12 __msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
13 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
bpf_neg(void)14 __naked int bpf_neg(void)
15 {
16 	asm volatile (
17 		"r2 = 8;"
18 		"r2 = -r2;"
19 		"if r2 != -8 goto 1f;"
20 		"r1 = r10;"
21 		"r1 += r2;"
22 	"1:"
23 		"r0 = 0;"
24 		"exit;"
25 		::: __clobber_all);
26 }
27 
28 SEC("?raw_tp")
29 __success __log_level(2)
30 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
31 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
32 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
33 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_to_le(void)34 __naked int bpf_end_to_le(void)
35 {
36 	asm volatile (
37 		"r2 = 0;"
38 		"r2 = le16 r2;"
39 		"if r2 != 0 goto 1f;"
40 		"r1 = r10;"
41 		"r1 += r2;"
42 	"1:"
43 		"r0 = 0;"
44 		"exit;"
45 		::: __clobber_all);
46 }
47 
48 
49 SEC("?raw_tp")
50 __success __log_level(2)
51 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
52 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
53 __msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
54 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_to_be(void)55 __naked int bpf_end_to_be(void)
56 {
57 	asm volatile (
58 		"r2 = 0;"
59 		"r2 = be16 r2;"
60 		"if r2 != 0 goto 1f;"
61 		"r1 = r10;"
62 		"r1 += r2;"
63 	"1:"
64 		"r0 = 0;"
65 		"exit;"
66 		::: __clobber_all);
67 }
68 
69 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
70 	(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
71 	defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
72 	__clang_major__ >= 18
73 
74 SEC("?raw_tp")
75 __success __log_level(2)
76 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
77 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
78 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
79 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_bswap(void)80 __naked int bpf_end_bswap(void)
81 {
82 	asm volatile (
83 		"r2 = 0;"
84 		"r2 = bswap16 r2;"
85 		"if r2 != 0 goto 1f;"
86 		"r1 = r10;"
87 		"r1 += r2;"
88 	"1:"
89 		"r0 = 0;"
90 		"exit;"
91 		::: __clobber_all);
92 }
93 
94 #ifdef CAN_USE_LOAD_ACQ_STORE_REL
95 
96 SEC("?raw_tp")
97 __success __log_level(2)
98 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
99 __msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))")
100 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
101 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_load_acquire(void)102 __naked int bpf_load_acquire(void)
103 {
104 	asm volatile (
105 	"r1 = 8;"
106 	"*(u64 *)(r10 - 8) = r1;"
107 	".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */
108 	"r3 = r10;"
109 	"r3 += r2;" /* mark_precise */
110 	"r0 = 0;"
111 	"exit;"
112 	:
113 	: __imm_insn(load_acquire_insn,
114 		     BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
115 	: __clobber_all);
116 }
117 
118 SEC("?raw_tp")
119 __success __log_level(2)
120 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10")
121 __msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)")
122 __msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)")
123 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_store_release(void)124 __naked int bpf_store_release(void)
125 {
126 	asm volatile (
127 	"r1 = 8;"
128 	".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */
129 	"r1 = *(u64 *)(r10 - 8);"
130 	"r2 = r10;"
131 	"r2 += r1;" /* mark_precise */
132 	"r0 = 0;"
133 	"exit;"
134 	:
135 	: __imm_insn(store_release_insn,
136 		     BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
137 	: __clobber_all);
138 }
139 
140 #endif /* CAN_USE_LOAD_ACQ_STORE_REL */
141 #endif /* v4 instruction */
142 
143 SEC("?raw_tp")
144 __success __log_level(2)
145 /*
146  * Without the bug fix there will be no history between "last_idx 3 first_idx 3"
147  * and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
148  * expected log messages to the one specific mark_chain_precision operation.
149  *
150  * This is quite fragile: if verifier checkpointing heuristic changes, this
151  * might need adjusting.
152  */
153 __msg("2: (07) r0 += 1                       ; R0_w=6")
154 __msg("3: (35) if r0 >= 0xa goto pc+1")
155 __msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
156 __msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
157 __msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
158 __msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
159 __msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
160 __msg("mark_precise: frame0: parent state regs= stack=:  R0_rw=P4")
161 __msg("3: R0_w=6")
state_loop_first_last_equal(void)162 __naked int state_loop_first_last_equal(void)
163 {
164 	asm volatile (
165 		"r0 = 0;"
166 	"l0_%=:"
167 		"r0 += 1;"
168 		"r0 += 1;"
169 		/* every few iterations we'll have a checkpoint here with
170 		 * first_idx == last_idx, potentially confusing precision
171 		 * backtracking logic
172 		 */
173 		"if r0 >= 10 goto l1_%=;"	/* checkpoint + mark_precise */
174 		"goto l0_%=;"
175 	"l1_%=:"
176 		"exit;"
177 		::: __clobber_common
178 	);
179 }
180 
__bpf_cond_op_r10(void)181 __used __naked static void __bpf_cond_op_r10(void)
182 {
183 	asm volatile (
184 	"r2 = 2314885393468386424 ll;"
185 	"goto +0;"
186 	"if r2 <= r10 goto +3;"
187 	"if r1 >= -1835016 goto +0;"
188 	"if r2 <= 8 goto +0;"
189 	"if r3 <= 0 goto +0;"
190 	"exit;"
191 	::: __clobber_all);
192 }
193 
194 SEC("?raw_tp")
195 __success __log_level(2)
196 __msg("8: (bd) if r2 <= r10 goto pc+3")
197 __msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
198 __msg("10: (b5) if r2 <= 0x8 goto pc+0")
199 __msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
200 __msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
201 __msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
202 __msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
bpf_cond_op_r10(void)203 __naked void bpf_cond_op_r10(void)
204 {
205 	asm volatile (
206 	"r3 = 0 ll;"
207 	"call __bpf_cond_op_r10;"
208 	"r0 = 0;"
209 	"exit;"
210 	::: __clobber_all);
211 }
212 
213 SEC("?raw_tp")
214 __success __log_level(2)
215 __msg("3: (bf) r3 = r10")
216 __msg("4: (bd) if r3 <= r2 goto pc+1")
217 __msg("5: (b5) if r2 <= 0x8 goto pc+2")
218 __msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
219 __msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
220 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
bpf_cond_op_not_r10(void)221 __naked void bpf_cond_op_not_r10(void)
222 {
223 	asm volatile (
224 	"r0 = 0;"
225 	"r2 = 2314885393468386424 ll;"
226 	"r3 = r10;"
227 	"if r3 <= r2 goto +1;"
228 	"if r2 <= 8 goto +2;"
229 	"r0 = 2 ll;"
230 	"exit;"
231 	::: __clobber_all);
232 }
233 
234 SEC("lsm.s/socket_connect")
235 __success __log_level(2)
236 __msg("0: (b7) r0 = 1                        ; R0_w=1")
237 __msg("1: (84) w0 = -w0                      ; R0_w=0xffffffff")
238 __msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
239 __msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0")
240 __msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
bpf_neg_2(void)241 __naked int bpf_neg_2(void)
242 {
243 	/*
244 	 * lsm.s/socket_connect requires a return value within [-4095, 0].
245 	 * Returning -1 is allowed
246 	 */
247 	asm volatile (
248 	"r0 = 1;"
249 	"w0 = -w0;"
250 	"exit;"
251 	::: __clobber_all);
252 }
253 
254 SEC("lsm.s/socket_connect")
255 __failure __msg("At program exit the register R0 has")
bpf_neg_3(void)256 __naked int bpf_neg_3(void)
257 {
258 	/*
259 	 * lsm.s/socket_connect requires a return value within [-4095, 0].
260 	 * Returning -10000 is not allowed.
261 	 */
262 	asm volatile (
263 	"r0 = 10000;"
264 	"w0 = -w0;"
265 	"exit;"
266 	::: __clobber_all);
267 }
268 
269 SEC("lsm.s/socket_connect")
270 __success __log_level(2)
271 __msg("0: (b7) r0 = 1                        ; R0_w=1")
272 __msg("1: (87) r0 = -r0                      ; R0_w=-1")
273 __msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
274 __msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0")
275 __msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
bpf_neg_4(void)276 __naked int bpf_neg_4(void)
277 {
278 	/*
279 	 * lsm.s/socket_connect requires a return value within [-4095, 0].
280 	 * Returning -1 is allowed
281 	 */
282 	asm volatile (
283 	"r0 = 1;"
284 	"r0 = -r0;"
285 	"exit;"
286 	::: __clobber_all);
287 }
288 
289 SEC("lsm.s/socket_connect")
290 __failure __msg("At program exit the register R0 has")
bpf_neg_5(void)291 __naked int bpf_neg_5(void)
292 {
293 	/*
294 	 * lsm.s/socket_connect requires a return value within [-4095, 0].
295 	 * Returning -10000 is not allowed.
296 	 */
297 	asm volatile (
298 	"r0 = 10000;"
299 	"r0 = -r0;"
300 	"exit;"
301 	::: __clobber_all);
302 }
303 
304 char _license[] SEC("license") = "GPL";
305