1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/var_off.c */
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7
8 struct {
9 __uint(type, BPF_MAP_TYPE_HASH);
10 __uint(max_entries, 1);
11 __type(key, long long);
12 __type(value, long long);
13 } map_hash_8b SEC(".maps");
14
15 SEC("lwt_in")
16 __description("variable-offset ctx access")
17 __failure __msg("variable ctx access var_off=(0x0; 0x4)")
variable_offset_ctx_access(void)18 __naked void variable_offset_ctx_access(void)
19 {
20 asm volatile (" \
21 /* Get an unknown value */ \
22 r2 = *(u32*)(r1 + 0); \
23 /* Make it small and 4-byte aligned */ \
24 r2 &= 4; \
25 /* add it to skb. We now have either &skb->len or\
26 * &skb->pkt_type, but we don't know which \
27 */ \
28 r1 += r2; \
29 /* dereference it */ \
30 r0 = *(u32*)(r1 + 0); \
31 exit; \
32 " ::: __clobber_all);
33 }
34
35 SEC("cgroup/skb")
36 __description("variable-offset stack read, priv vs unpriv")
37 __success __failure_unpriv
38 __msg_unpriv("R2 variable stack access prohibited for !root")
39 __retval(0)
stack_read_priv_vs_unpriv(void)40 __naked void stack_read_priv_vs_unpriv(void)
41 {
42 asm volatile (" \
43 /* Fill the top 8 bytes of the stack */ \
44 r0 = 0; \
45 *(u64*)(r10 - 8) = r0; \
46 /* Get an unknown value */ \
47 r2 = *(u32*)(r1 + 0); \
48 /* Make it small and 4-byte aligned */ \
49 r2 &= 4; \
50 r2 -= 8; \
51 /* add it to fp. We now have either fp-4 or fp-8, but\
52 * we don't know which \
53 */ \
54 r2 += r10; \
55 /* dereference it for a stack read */ \
56 r0 = *(u32*)(r2 + 0); \
57 r0 = 0; \
58 exit; \
59 " ::: __clobber_all);
60 }
61
62 SEC("lwt_in")
63 __description("variable-offset stack read, uninitialized")
64 __failure __msg("invalid variable-offset read from stack R2")
variable_offset_stack_read_uninitialized(void)65 __naked void variable_offset_stack_read_uninitialized(void)
66 {
67 asm volatile (" \
68 /* Get an unknown value */ \
69 r2 = *(u32*)(r1 + 0); \
70 /* Make it small and 4-byte aligned */ \
71 r2 &= 4; \
72 r2 -= 8; \
73 /* add it to fp. We now have either fp-4 or fp-8, but\
74 * we don't know which \
75 */ \
76 r2 += r10; \
77 /* dereference it for a stack read */ \
78 r0 = *(u32*)(r2 + 0); \
79 r0 = 0; \
80 exit; \
81 " ::: __clobber_all);
82 }
83
84 SEC("socket")
85 __description("variable-offset stack write, priv vs unpriv")
86 __success __failure_unpriv
87 /* Variable stack access is rejected for unprivileged.
88 */
89 __msg_unpriv("R2 variable stack access prohibited for !root")
90 __retval(0)
stack_write_priv_vs_unpriv(void)91 __naked void stack_write_priv_vs_unpriv(void)
92 {
93 asm volatile (" \
94 /* Get an unknown value */ \
95 r2 = *(u32*)(r1 + 0); \
96 /* Make it small and 8-byte aligned */ \
97 r2 &= 8; \
98 r2 -= 16; \
99 /* Add it to fp. We now have either fp-8 or fp-16, but\
100 * we don't know which \
101 */ \
102 r2 += r10; \
103 /* Dereference it for a stack write */ \
104 r0 = 0; \
105 *(u64*)(r2 + 0) = r0; \
106 /* Now read from the address we just wrote. This shows\
107 * that, after a variable-offset write, a priviledged\
108 * program can read the slots that were in the range of\
109 * that write (even if the verifier doesn't actually know\
110 * if the slot being read was really written to or not.\
111 */ \
112 r3 = *(u64*)(r2 + 0); \
113 r0 = 0; \
114 exit; \
115 " ::: __clobber_all);
116 }
117
118 SEC("socket")
119 __description("variable-offset stack write clobbers spilled regs")
120 __failure
121 /* In the priviledged case, dereferencing a spilled-and-then-filled
122 * register is rejected because the previous variable offset stack
123 * write might have overwritten the spilled pointer (i.e. we lose track
124 * of the spilled register when we analyze the write).
125 */
126 __msg("R2 invalid mem access 'scalar'")
127 __failure_unpriv
128 /* The unprivileged case is not too interesting; variable
129 * stack access is rejected.
130 */
131 __msg_unpriv("R2 variable stack access prohibited for !root")
stack_write_clobbers_spilled_regs(void)132 __naked void stack_write_clobbers_spilled_regs(void)
133 {
134 asm volatile (" \
135 /* Dummy instruction; needed because we need to patch the next one\
136 * and we can't patch the first instruction. \
137 */ \
138 r6 = 0; \
139 /* Make R0 a map ptr */ \
140 r0 = %[map_hash_8b] ll; \
141 /* Get an unknown value */ \
142 r2 = *(u32*)(r1 + 0); \
143 /* Make it small and 8-byte aligned */ \
144 r2 &= 8; \
145 r2 -= 16; \
146 /* Add it to fp. We now have either fp-8 or fp-16, but\
147 * we don't know which. \
148 */ \
149 r2 += r10; \
150 /* Spill R0(map ptr) into stack */ \
151 *(u64*)(r10 - 8) = r0; \
152 /* Dereference the unknown value for a stack write */\
153 r0 = 0; \
154 *(u64*)(r2 + 0) = r0; \
155 /* Fill the register back into R2 */ \
156 r2 = *(u64*)(r10 - 8); \
157 /* Try to dereference R2 for a memory load */ \
158 r0 = *(u64*)(r2 + 8); \
159 exit; \
160 " :
161 : __imm_addr(map_hash_8b)
162 : __clobber_all);
163 }
164
165 SEC("sockops")
166 __description("indirect variable-offset stack access, unbounded")
167 __failure __msg("invalid unbounded variable-offset indirect access to stack R4")
variable_offset_stack_access_unbounded(void)168 __naked void variable_offset_stack_access_unbounded(void)
169 {
170 asm volatile (" \
171 r2 = 6; \
172 r3 = 28; \
173 /* Fill the top 16 bytes of the stack. */ \
174 r4 = 0; \
175 *(u64*)(r10 - 16) = r4; \
176 r4 = 0; \
177 *(u64*)(r10 - 8) = r4; \
178 /* Get an unknown value. */ \
179 r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\
180 /* Check the lower bound but don't check the upper one. */\
181 if r4 s< 0 goto l0_%=; \
182 /* Point the lower bound to initialized stack. Offset is now in range\
183 * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\
184 */ \
185 r4 -= 16; \
186 r4 += r10; \
187 r5 = 8; \
188 /* Dereference it indirectly. */ \
189 call %[bpf_getsockopt]; \
190 l0_%=: r0 = 0; \
191 exit; \
192 " :
193 : __imm(bpf_getsockopt),
194 __imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received))
195 : __clobber_all);
196 }
197
198 SEC("lwt_in")
199 __description("indirect variable-offset stack access, max out of bound")
200 __failure __msg("invalid variable-offset indirect access to stack R2")
access_max_out_of_bound(void)201 __naked void access_max_out_of_bound(void)
202 {
203 asm volatile (" \
204 /* Fill the top 8 bytes of the stack */ \
205 r2 = 0; \
206 *(u64*)(r10 - 8) = r2; \
207 /* Get an unknown value */ \
208 r2 = *(u32*)(r1 + 0); \
209 /* Make it small and 4-byte aligned */ \
210 r2 &= 4; \
211 r2 -= 8; \
212 /* add it to fp. We now have either fp-4 or fp-8, but\
213 * we don't know which \
214 */ \
215 r2 += r10; \
216 /* dereference it indirectly */ \
217 r1 = %[map_hash_8b] ll; \
218 call %[bpf_map_lookup_elem]; \
219 r0 = 0; \
220 exit; \
221 " :
222 : __imm(bpf_map_lookup_elem),
223 __imm_addr(map_hash_8b)
224 : __clobber_all);
225 }
226
227 SEC("lwt_in")
228 __description("indirect variable-offset stack access, min out of bound")
229 __failure __msg("invalid variable-offset indirect access to stack R2")
access_min_out_of_bound(void)230 __naked void access_min_out_of_bound(void)
231 {
232 asm volatile (" \
233 /* Fill the top 8 bytes of the stack */ \
234 r2 = 0; \
235 *(u64*)(r10 - 8) = r2; \
236 /* Get an unknown value */ \
237 r2 = *(u32*)(r1 + 0); \
238 /* Make it small and 4-byte aligned */ \
239 r2 &= 4; \
240 r2 -= 516; \
241 /* add it to fp. We now have either fp-516 or fp-512, but\
242 * we don't know which \
243 */ \
244 r2 += r10; \
245 /* dereference it indirectly */ \
246 r1 = %[map_hash_8b] ll; \
247 call %[bpf_map_lookup_elem]; \
248 r0 = 0; \
249 exit; \
250 " :
251 : __imm(bpf_map_lookup_elem),
252 __imm_addr(map_hash_8b)
253 : __clobber_all);
254 }
255
256 SEC("lwt_in")
257 __description("indirect variable-offset stack access, min_off < min_initialized")
258 __failure __msg("invalid indirect read from stack R2 var_off")
access_min_off_min_initialized(void)259 __naked void access_min_off_min_initialized(void)
260 {
261 asm volatile (" \
262 /* Fill only the top 8 bytes of the stack. */ \
263 r2 = 0; \
264 *(u64*)(r10 - 8) = r2; \
265 /* Get an unknown value */ \
266 r2 = *(u32*)(r1 + 0); \
267 /* Make it small and 4-byte aligned. */ \
268 r2 &= 4; \
269 r2 -= 16; \
270 /* Add it to fp. We now have either fp-12 or fp-16, but we don't know\
271 * which. fp-16 size 8 is partially uninitialized stack.\
272 */ \
273 r2 += r10; \
274 /* Dereference it indirectly. */ \
275 r1 = %[map_hash_8b] ll; \
276 call %[bpf_map_lookup_elem]; \
277 r0 = 0; \
278 exit; \
279 " :
280 : __imm(bpf_map_lookup_elem),
281 __imm_addr(map_hash_8b)
282 : __clobber_all);
283 }
284
285 SEC("cgroup/skb")
286 __description("indirect variable-offset stack access, priv vs unpriv")
287 __success __failure_unpriv
288 __msg_unpriv("R2 variable stack access prohibited for !root")
289 __retval(0)
stack_access_priv_vs_unpriv(void)290 __naked void stack_access_priv_vs_unpriv(void)
291 {
292 asm volatile (" \
293 /* Fill the top 16 bytes of the stack. */ \
294 r2 = 0; \
295 *(u64*)(r10 - 16) = r2; \
296 r2 = 0; \
297 *(u64*)(r10 - 8) = r2; \
298 /* Get an unknown value. */ \
299 r2 = *(u32*)(r1 + 0); \
300 /* Make it small and 4-byte aligned. */ \
301 r2 &= 4; \
302 r2 -= 16; \
303 /* Add it to fp. We now have either fp-12 or fp-16, we don't know\
304 * which, but either way it points to initialized stack.\
305 */ \
306 r2 += r10; \
307 /* Dereference it indirectly. */ \
308 r1 = %[map_hash_8b] ll; \
309 call %[bpf_map_lookup_elem]; \
310 r0 = 0; \
311 exit; \
312 " :
313 : __imm(bpf_map_lookup_elem),
314 __imm_addr(map_hash_8b)
315 : __clobber_all);
316 }
317
318 SEC("lwt_in")
319 __description("indirect variable-offset stack access, ok")
320 __success __retval(0)
variable_offset_stack_access_ok(void)321 __naked void variable_offset_stack_access_ok(void)
322 {
323 asm volatile (" \
324 /* Fill the top 16 bytes of the stack. */ \
325 r2 = 0; \
326 *(u64*)(r10 - 16) = r2; \
327 r2 = 0; \
328 *(u64*)(r10 - 8) = r2; \
329 /* Get an unknown value. */ \
330 r2 = *(u32*)(r1 + 0); \
331 /* Make it small and 4-byte aligned. */ \
332 r2 &= 4; \
333 r2 -= 16; \
334 /* Add it to fp. We now have either fp-12 or fp-16, we don't know\
335 * which, but either way it points to initialized stack.\
336 */ \
337 r2 += r10; \
338 /* Dereference it indirectly. */ \
339 r1 = %[map_hash_8b] ll; \
340 call %[bpf_map_lookup_elem]; \
341 r0 = 0; \
342 exit; \
343 " :
344 : __imm(bpf_map_lookup_elem),
345 __imm_addr(map_hash_8b)
346 : __clobber_all);
347 }
348
349 char _license[] SEC("license") = "GPL";
350