1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "bpf_misc.h"
6 
7 struct {
8 	__uint(type, BPF_MAP_TYPE_ARRAY);
9 	__uint(max_entries, 8);
10 	__type(key, __u32);
11 	__type(value, __u64);
12 } map SEC(".maps");
13 
14 struct {
15 	__uint(type, BPF_MAP_TYPE_USER_RINGBUF);
16 	__uint(max_entries, 8);
17 } ringbuf SEC(".maps");
18 
19 struct vm_area_struct;
20 struct bpf_map;
21 
22 struct buf_context {
23 	char *buf;
24 };
25 
26 struct num_context {
27 	__u64 i;
28 	__u64 j;
29 };
30 
31 __u8 choice_arr[2] = { 0, 1 };
32 
unsafe_on_2nd_iter_cb(__u32 idx,struct buf_context * ctx)33 static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
34 {
35 	if (idx == 0) {
36 		ctx->buf = (char *)(0xDEAD);
37 		return 0;
38 	}
39 
40 	if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
41 		return 1;
42 
43 	return 0;
44 }
45 
46 SEC("?raw_tp")
47 __failure __msg("R1 type=scalar expected=fp")
unsafe_on_2nd_iter(void * unused)48 int unsafe_on_2nd_iter(void *unused)
49 {
50 	char buf[4];
51 	struct buf_context loop_ctx = { .buf = buf };
52 
53 	bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
54 	return 0;
55 }
56 
unsafe_on_zero_iter_cb(__u32 idx,struct num_context * ctx)57 static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
58 {
59 	ctx->i = 0;
60 	return 0;
61 }
62 
63 SEC("?raw_tp")
64 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
unsafe_on_zero_iter(void * unused)65 int unsafe_on_zero_iter(void *unused)
66 {
67 	struct num_context loop_ctx = { .i = 32 };
68 
69 	bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
70 	return choice_arr[loop_ctx.i];
71 }
72 
widening_cb(__u32 idx,struct num_context * ctx)73 static int widening_cb(__u32 idx, struct num_context *ctx)
74 {
75 	++ctx->i;
76 	return 0;
77 }
78 
79 SEC("?raw_tp")
80 __success
widening(void * unused)81 int widening(void *unused)
82 {
83 	struct num_context loop_ctx = { .i = 0, .j = 1 };
84 
85 	bpf_loop(100, widening_cb, &loop_ctx, 0);
86 	/* loop_ctx.j is not changed during callback iteration,
87 	 * verifier should not apply widening to it.
88 	 */
89 	return choice_arr[loop_ctx.j];
90 }
91 
loop_detection_cb(__u32 idx,struct num_context * ctx)92 static int loop_detection_cb(__u32 idx, struct num_context *ctx)
93 {
94 	for (;;) {}
95 	return 0;
96 }
97 
98 SEC("?raw_tp")
99 __failure __msg("infinite loop detected")
loop_detection(void * unused)100 int loop_detection(void *unused)
101 {
102 	struct num_context loop_ctx = { .i = 0 };
103 
104 	bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
105 	return 0;
106 }
107 
oob_state_machine(struct num_context * ctx)108 static __always_inline __u64 oob_state_machine(struct num_context *ctx)
109 {
110 	switch (ctx->i) {
111 	case 0:
112 		ctx->i = 1;
113 		break;
114 	case 1:
115 		ctx->i = 32;
116 		break;
117 	}
118 	return 0;
119 }
120 
for_each_map_elem_cb(struct bpf_map * map,__u32 * key,__u64 * val,void * data)121 static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
122 {
123 	return oob_state_machine(data);
124 }
125 
126 SEC("?raw_tp")
127 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
unsafe_for_each_map_elem(void * unused)128 int unsafe_for_each_map_elem(void *unused)
129 {
130 	struct num_context loop_ctx = { .i = 0 };
131 
132 	bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
133 	return choice_arr[loop_ctx.i];
134 }
135 
ringbuf_drain_cb(struct bpf_dynptr * dynptr,void * data)136 static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
137 {
138 	return oob_state_machine(data);
139 }
140 
141 SEC("?raw_tp")
142 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
unsafe_ringbuf_drain(void * unused)143 int unsafe_ringbuf_drain(void *unused)
144 {
145 	struct num_context loop_ctx = { .i = 0 };
146 
147 	bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
148 	return choice_arr[loop_ctx.i];
149 }
150 
find_vma_cb(struct task_struct * task,struct vm_area_struct * vma,void * data)151 static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
152 {
153 	return oob_state_machine(data);
154 }
155 
156 SEC("?raw_tp")
157 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
unsafe_find_vma(void * unused)158 int unsafe_find_vma(void *unused)
159 {
160 	struct task_struct *task = bpf_get_current_task_btf();
161 	struct num_context loop_ctx = { .i = 0 };
162 
163 	bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
164 	return choice_arr[loop_ctx.i];
165 }
166 
iter_limit_cb(__u32 idx,struct num_context * ctx)167 static int iter_limit_cb(__u32 idx, struct num_context *ctx)
168 {
169 	ctx->i++;
170 	return 0;
171 }
172 
173 SEC("?raw_tp")
174 __success
bpf_loop_iter_limit_ok(void * unused)175 int bpf_loop_iter_limit_ok(void *unused)
176 {
177 	struct num_context ctx = { .i = 0 };
178 
179 	bpf_loop(1, iter_limit_cb, &ctx, 0);
180 	return choice_arr[ctx.i];
181 }
182 
183 SEC("?raw_tp")
184 __failure __msg("invalid access to map value, value_size=2 off=2 size=1")
bpf_loop_iter_limit_overflow(void * unused)185 int bpf_loop_iter_limit_overflow(void *unused)
186 {
187 	struct num_context ctx = { .i = 0 };
188 
189 	bpf_loop(2, iter_limit_cb, &ctx, 0);
190 	return choice_arr[ctx.i];
191 }
192 
iter_limit_level2a_cb(__u32 idx,struct num_context * ctx)193 static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
194 {
195 	ctx->i += 100;
196 	return 0;
197 }
198 
iter_limit_level2b_cb(__u32 idx,struct num_context * ctx)199 static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
200 {
201 	ctx->i += 10;
202 	return 0;
203 }
204 
iter_limit_level1_cb(__u32 idx,struct num_context * ctx)205 static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
206 {
207 	ctx->i += 1;
208 	bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
209 	bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
210 	return 0;
211 }
212 
213 /* Check that path visiting every callback function once had been
214  * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
215  * with each decimal digit corresponding to a callback visit marker.
216  */
217 SEC("socket")
218 __success __retval(111111)
bpf_loop_iter_limit_nested(void * unused)219 int bpf_loop_iter_limit_nested(void *unused)
220 {
221 	struct num_context ctx1 = { .i = 0 };
222 	struct num_context ctx2 = { .i = 0 };
223 	__u64 a, b, c;
224 
225 	bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
226 	bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
227 	a = ctx1.i;
228 	b = ctx2.i;
229 	/* Force 'ctx1.i' and 'ctx2.i' precise. */
230 	c = choice_arr[(a + b) % 2];
231 	/* This makes 'c' zero, but neither clang nor verifier know it. */
232 	c /= 10;
233 	/* Make sure that verifier does not visit 'impossible' states:
234 	 * enumerate all possible callback visit masks.
235 	 */
236 	if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
237 	    b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
238 		asm volatile ("r0 /= 0;" ::: "r0");
239 	return 1000 * a + b + c;
240 }
241 
242 struct iter_limit_bug_ctx {
243 	__u64 a;
244 	__u64 b;
245 	__u64 c;
246 };
247 
iter_limit_bug_cb(void)248 static __naked void iter_limit_bug_cb(void)
249 {
250 	/* This is the same as C code below, but written
251 	 * in assembly to control which branches are fall-through.
252 	 *
253 	 *   switch (bpf_get_prandom_u32()) {
254 	 *   case 1:  ctx->a = 42; break;
255 	 *   case 2:  ctx->b = 42; break;
256 	 *   default: ctx->c = 42; break;
257 	 *   }
258 	 */
259 	asm volatile (
260 	"r9 = r2;"
261 	"call %[bpf_get_prandom_u32];"
262 	"r1 = r0;"
263 	"r2 = 42;"
264 	"r0 = 0;"
265 	"if r1 == 0x1 goto 1f;"
266 	"if r1 == 0x2 goto 2f;"
267 	"*(u64 *)(r9 + 16) = r2;"
268 	"exit;"
269 	"1: *(u64 *)(r9 + 0) = r2;"
270 	"exit;"
271 	"2: *(u64 *)(r9 + 8) = r2;"
272 	"exit;"
273 	:
274 	: __imm(bpf_get_prandom_u32)
275 	: __clobber_all
276 	);
277 }
278 
279 SEC("tc")
280 __failure
__flag(BPF_F_TEST_STATE_FREQ)281 __flag(BPF_F_TEST_STATE_FREQ)
282 int iter_limit_bug(struct __sk_buff *skb)
283 {
284 	struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
285 
286 	bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
287 
288 	/* This is the same as C code below,
289 	 * written in assembly to guarantee checks order.
290 	 *
291 	 *   if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
292 	 *     asm volatile("r1 /= 0;":::"r1");
293 	 */
294 	asm volatile (
295 	"r1 = *(u64 *)%[ctx_a];"
296 	"if r1 != 42 goto 1f;"
297 	"r1 = *(u64 *)%[ctx_b];"
298 	"if r1 != 42 goto 1f;"
299 	"r1 = *(u64 *)%[ctx_c];"
300 	"if r1 != 7 goto 1f;"
301 	"r1 /= 0;"
302 	"1:"
303 	:
304 	: [ctx_a]"m"(ctx.a),
305 	  [ctx_b]"m"(ctx.b),
306 	  [ctx_c]"m"(ctx.c)
307 	: "r1"
308 	);
309 	return 0;
310 }
311 
312 char _license[] SEC("license") = "GPL";
313