1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <bpf/bpf.h>
4 #include <test_progs.h>
5
6 #ifdef __x86_64__
map_create(__u32 map_type,__u32 max_entries)7 static int map_create(__u32 map_type, __u32 max_entries)
8 {
9 const char *map_name = "insn_array";
10 __u32 key_size = 4;
11 __u32 value_size = sizeof(struct bpf_insn_array_value);
12
13 return bpf_map_create(map_type, map_name, key_size, value_size, max_entries, NULL);
14 }
15
prog_load(struct bpf_insn * insns,__u32 insn_cnt,int * fd_array,__u32 fd_array_cnt)16 static int prog_load(struct bpf_insn *insns, __u32 insn_cnt, int *fd_array, __u32 fd_array_cnt)
17 {
18 LIBBPF_OPTS(bpf_prog_load_opts, opts);
19
20 opts.fd_array = fd_array;
21 opts.fd_array_cnt = fd_array_cnt;
22
23 return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, &opts);
24 }
25
__check_success(struct bpf_insn * insns,__u32 insn_cnt,__u32 * map_in,__u32 * map_out)26 static void __check_success(struct bpf_insn *insns, __u32 insn_cnt, __u32 *map_in, __u32 *map_out)
27 {
28 struct bpf_insn_array_value val = {};
29 int prog_fd = -1, map_fd, i;
30
31 map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, insn_cnt);
32 if (!ASSERT_GE(map_fd, 0, "map_create"))
33 return;
34
35 for (i = 0; i < insn_cnt; i++) {
36 val.orig_off = map_in[i];
37 if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
38 goto cleanup;
39 }
40
41 if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
42 goto cleanup;
43
44 prog_fd = prog_load(insns, insn_cnt, &map_fd, 1);
45 if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
46 goto cleanup;
47
48 for (i = 0; i < insn_cnt; i++) {
49 char buf[64];
50
51 if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
52 goto cleanup;
53
54 snprintf(buf, sizeof(buf), "val.xlated_off should be equal map_out[%d]", i);
55 ASSERT_EQ(val.xlated_off, map_out[i], buf);
56 }
57
58 cleanup:
59 close(prog_fd);
60 close(map_fd);
61 }
62
63 /*
64 * Load a program, which will not be anyhow mangled by the verifier. Add an
65 * insn_array map pointing to every instruction. Check that it hasn't changed
66 * after the program load.
67 */
check_one_to_one_mapping(void)68 static void check_one_to_one_mapping(void)
69 {
70 struct bpf_insn insns[] = {
71 BPF_MOV64_IMM(BPF_REG_0, 4),
72 BPF_MOV64_IMM(BPF_REG_0, 3),
73 BPF_MOV64_IMM(BPF_REG_0, 2),
74 BPF_MOV64_IMM(BPF_REG_0, 1),
75 BPF_MOV64_IMM(BPF_REG_0, 0),
76 BPF_EXIT_INSN(),
77 };
78 __u32 map_in[] = {0, 1, 2, 3, 4, 5};
79 __u32 map_out[] = {0, 1, 2, 3, 4, 5};
80
81 __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
82 }
83
84 /*
85 * Load a program with two patches (get jiffies, for simplicity). Add an
86 * insn_array map pointing to every instruction. Check how it was changed
87 * after the program load.
88 */
check_simple(void)89 static void check_simple(void)
90 {
91 struct bpf_insn insns[] = {
92 BPF_MOV64_IMM(BPF_REG_0, 2),
93 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
94 BPF_MOV64_IMM(BPF_REG_0, 1),
95 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
96 BPF_MOV64_IMM(BPF_REG_0, 0),
97 BPF_EXIT_INSN(),
98 };
99 __u32 map_in[] = {0, 1, 2, 3, 4, 5};
100 __u32 map_out[] = {0, 1, 4, 5, 8, 9};
101
102 __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
103 }
104
105 /*
106 * Verifier can delete code in two cases: nops & dead code. From insn
107 * array's point of view, the two cases are the same, so test using
108 * the simplest method: by loading some nops
109 */
check_deletions(void)110 static void check_deletions(void)
111 {
112 struct bpf_insn insns[] = {
113 BPF_MOV64_IMM(BPF_REG_0, 2),
114 BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
115 BPF_MOV64_IMM(BPF_REG_0, 1),
116 BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
117 BPF_MOV64_IMM(BPF_REG_0, 0),
118 BPF_EXIT_INSN(),
119 };
120 __u32 map_in[] = {0, 1, 2, 3, 4, 5};
121 __u32 map_out[] = {0, -1, 1, -1, 2, 3};
122
123 __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
124 }
125
126 /*
127 * Same test as check_deletions, but also add code which adds instructions
128 */
check_deletions_with_functions(void)129 static void check_deletions_with_functions(void)
130 {
131 struct bpf_insn insns[] = {
132 BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
134 BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
135 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
136 BPF_MOV64_IMM(BPF_REG_0, 1),
137 BPF_EXIT_INSN(),
138 BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
140 BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
141 BPF_MOV64_IMM(BPF_REG_0, 2),
142 BPF_EXIT_INSN(),
143 };
144 __u32 map_in[] = { 0, 1, 2, 3, 4, 5, /* func */ 6, 7, 8, 9, 10};
145 __u32 map_out[] = {-1, 0, -1, 3, 4, 5, /* func */ -1, 6, -1, 9, 10};
146
147 __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
148 }
149
150 /*
151 * Try to load a program with a map which points to outside of the program
152 */
check_out_of_bounds_index(void)153 static void check_out_of_bounds_index(void)
154 {
155 struct bpf_insn insns[] = {
156 BPF_MOV64_IMM(BPF_REG_0, 4),
157 BPF_MOV64_IMM(BPF_REG_0, 3),
158 BPF_MOV64_IMM(BPF_REG_0, 2),
159 BPF_MOV64_IMM(BPF_REG_0, 1),
160 BPF_MOV64_IMM(BPF_REG_0, 0),
161 BPF_EXIT_INSN(),
162 };
163 int prog_fd, map_fd;
164 struct bpf_insn_array_value val = {};
165 int key;
166
167 map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
168 if (!ASSERT_GE(map_fd, 0, "map_create"))
169 return;
170
171 key = 0;
172 val.orig_off = ARRAY_SIZE(insns); /* too big */
173 if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem"))
174 goto cleanup;
175
176 if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
177 goto cleanup;
178
179 prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
180 if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) {
181 close(prog_fd);
182 goto cleanup;
183 }
184
185 cleanup:
186 close(map_fd);
187 }
188
189 /*
190 * Try to load a program with a map which points to the middle of 16-bit insn
191 */
check_mid_insn_index(void)192 static void check_mid_insn_index(void)
193 {
194 struct bpf_insn insns[] = {
195 BPF_LD_IMM64(BPF_REG_0, 0), /* 2 x 8 */
196 BPF_EXIT_INSN(),
197 };
198 int prog_fd, map_fd;
199 struct bpf_insn_array_value val = {};
200 int key;
201
202 map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
203 if (!ASSERT_GE(map_fd, 0, "map_create"))
204 return;
205
206 key = 0;
207 val.orig_off = 1; /* middle of 16-byte instruction */
208 if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem"))
209 goto cleanup;
210
211 if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
212 goto cleanup;
213
214 prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
215 if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) {
216 close(prog_fd);
217 goto cleanup;
218 }
219
220 cleanup:
221 close(map_fd);
222 }
223
check_incorrect_index(void)224 static void check_incorrect_index(void)
225 {
226 check_out_of_bounds_index();
227 check_mid_insn_index();
228 }
229
set_bpf_jit_harden(char * level)230 static int set_bpf_jit_harden(char *level)
231 {
232 char old_level;
233 int err = -1;
234 int fd = -1;
235
236 fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR | O_NONBLOCK);
237 if (fd < 0) {
238 ASSERT_FAIL("open .../bpf_jit_harden returned %d (errno=%d)", fd, errno);
239 return -1;
240 }
241
242 err = read(fd, &old_level, 1);
243 if (err != 1) {
244 ASSERT_FAIL("read from .../bpf_jit_harden returned %d (errno=%d)", err, errno);
245 err = -1;
246 goto end;
247 }
248
249 lseek(fd, 0, SEEK_SET);
250
251 err = write(fd, level, 1);
252 if (err != 1) {
253 ASSERT_FAIL("write to .../bpf_jit_harden returned %d (errno=%d)", err, errno);
254 err = -1;
255 goto end;
256 }
257
258 err = 0;
259 *level = old_level;
260 end:
261 if (fd >= 0)
262 close(fd);
263 return err;
264 }
265
check_blindness(void)266 static void check_blindness(void)
267 {
268 struct bpf_insn insns[] = {
269 BPF_MOV64_IMM(BPF_REG_0, 4),
270 BPF_MOV64_IMM(BPF_REG_0, 3),
271 BPF_MOV64_IMM(BPF_REG_0, 2),
272 BPF_MOV64_IMM(BPF_REG_0, 1),
273 BPF_EXIT_INSN(),
274 };
275 int prog_fd = -1, map_fd;
276 struct bpf_insn_array_value val = {};
277 char bpf_jit_harden = '@'; /* non-exizsting value */
278 int i;
279
280 map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
281 if (!ASSERT_GE(map_fd, 0, "map_create"))
282 return;
283
284 for (i = 0; i < ARRAY_SIZE(insns); i++) {
285 val.orig_off = i;
286 if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
287 goto cleanup;
288 }
289
290 if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
291 goto cleanup;
292
293 bpf_jit_harden = '2';
294 if (set_bpf_jit_harden(&bpf_jit_harden)) {
295 bpf_jit_harden = '@'; /* open, read or write failed => no write was done */
296 goto cleanup;
297 }
298
299 prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
300 if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
301 goto cleanup;
302
303 for (i = 0; i < ARRAY_SIZE(insns); i++) {
304 char fmt[32];
305
306 if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
307 goto cleanup;
308
309 snprintf(fmt, sizeof(fmt), "val should be equal 3*%d", i);
310 ASSERT_EQ(val.xlated_off, i * 3, fmt);
311 }
312
313 cleanup:
314 /* restore the old one */
315 if (bpf_jit_harden != '@')
316 set_bpf_jit_harden(&bpf_jit_harden);
317
318 close(prog_fd);
319 close(map_fd);
320 }
321
322 /* Once map was initialized, it should be frozen */
check_load_unfrozen_map(void)323 static void check_load_unfrozen_map(void)
324 {
325 struct bpf_insn insns[] = {
326 BPF_MOV64_IMM(BPF_REG_0, 0),
327 BPF_EXIT_INSN(),
328 };
329 int prog_fd = -1, map_fd;
330 struct bpf_insn_array_value val = {};
331 int i;
332
333 map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
334 if (!ASSERT_GE(map_fd, 0, "map_create"))
335 return;
336
337 for (i = 0; i < ARRAY_SIZE(insns); i++) {
338 val.orig_off = i;
339 if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
340 goto cleanup;
341 }
342
343 prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
344 if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)"))
345 goto cleanup;
346
347 /* correctness: now freeze the map, the program should load fine */
348
349 if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
350 goto cleanup;
351
352 prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
353 if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
354 goto cleanup;
355
356 for (i = 0; i < ARRAY_SIZE(insns); i++) {
357 if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
358 goto cleanup;
359
360 ASSERT_EQ(val.xlated_off, i, "val should be equal i");
361 }
362
363 cleanup:
364 close(prog_fd);
365 close(map_fd);
366 }
367
368 /* Map can be used only by one BPF program */
check_no_map_reuse(void)369 static void check_no_map_reuse(void)
370 {
371 struct bpf_insn insns[] = {
372 BPF_MOV64_IMM(BPF_REG_0, 0),
373 BPF_EXIT_INSN(),
374 };
375 int prog_fd = -1, map_fd, extra_fd = -1;
376 struct bpf_insn_array_value val = {};
377 int i;
378
379 map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
380 if (!ASSERT_GE(map_fd, 0, "map_create"))
381 return;
382
383 for (i = 0; i < ARRAY_SIZE(insns); i++) {
384 val.orig_off = i;
385 if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
386 goto cleanup;
387 }
388
389 if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
390 goto cleanup;
391
392 prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
393 if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
394 goto cleanup;
395
396 for (i = 0; i < ARRAY_SIZE(insns); i++) {
397 if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
398 goto cleanup;
399
400 ASSERT_EQ(val.xlated_off, i, "val should be equal i");
401 }
402
403 extra_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
404 if (!ASSERT_EQ(extra_fd, -EBUSY, "program should have been rejected (extra_fd != -EBUSY)"))
405 goto cleanup;
406
407 /* correctness: check that prog is still loadable without fd_array */
408 extra_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
409 if (!ASSERT_GE(extra_fd, 0, "bpf(BPF_PROG_LOAD): expected no error"))
410 goto cleanup;
411
412 cleanup:
413 close(extra_fd);
414 close(prog_fd);
415 close(map_fd);
416 }
417
check_bpf_no_lookup(void)418 static void check_bpf_no_lookup(void)
419 {
420 struct bpf_insn insns[] = {
421 BPF_LD_MAP_FD(BPF_REG_1, 0),
422 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
426 BPF_EXIT_INSN(),
427 };
428 int prog_fd = -1, map_fd;
429
430 map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
431 if (!ASSERT_GE(map_fd, 0, "map_create"))
432 return;
433
434 insns[0].imm = map_fd;
435
436 if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
437 goto cleanup;
438
439 prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
440 if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)"))
441 goto cleanup;
442
443 /* correctness: check that prog is still loadable with normal map */
444 close(map_fd);
445 map_fd = map_create(BPF_MAP_TYPE_ARRAY, 1);
446 insns[0].imm = map_fd;
447 prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
448 if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
449 goto cleanup;
450
451 cleanup:
452 close(prog_fd);
453 close(map_fd);
454 }
455
check_bpf_side(void)456 static void check_bpf_side(void)
457 {
458 check_bpf_no_lookup();
459 }
460
__test_bpf_insn_array(void)461 static void __test_bpf_insn_array(void)
462 {
463 /* Test if offsets are adjusted properly */
464
465 if (test__start_subtest("one2one"))
466 check_one_to_one_mapping();
467
468 if (test__start_subtest("simple"))
469 check_simple();
470
471 if (test__start_subtest("deletions"))
472 check_deletions();
473
474 if (test__start_subtest("deletions-with-functions"))
475 check_deletions_with_functions();
476
477 if (test__start_subtest("blindness"))
478 check_blindness();
479
480 /* Check all kinds of operations and related restrictions */
481
482 if (test__start_subtest("incorrect-index"))
483 check_incorrect_index();
484
485 if (test__start_subtest("load-unfrozen-map"))
486 check_load_unfrozen_map();
487
488 if (test__start_subtest("no-map-reuse"))
489 check_no_map_reuse();
490
491 if (test__start_subtest("bpf-side-ops"))
492 check_bpf_side();
493 }
494 #else
__test_bpf_insn_array(void)495 static void __test_bpf_insn_array(void)
496 {
497 test__skip();
498 }
499 #endif
500
test_bpf_insn_array(void)501 void test_bpf_insn_array(void)
502 {
503 __test_bpf_insn_array();
504 }
505