1 // SPDX-License-Identifier: GPL-2.0 2 /* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ 3 4 #include <linux/bpf.h> 5 #include <bpf/bpf_helpers.h> 6 #include "bpf_misc.h" 7 8 struct { 9 __uint(type, BPF_MAP_TYPE_HASH); 10 __uint(max_entries, 1); 11 __type(key, long long); 12 __type(value, long long); 13 } map_hash_8b SEC(".maps"); 14 15 SEC("socket") 16 __description("subtraction bounds (map value) variant 1") 17 __failure __msg("R0 max value is outside of the allowed memory range") 18 __failure_unpriv 19 __naked void bounds_map_value_variant_1(void) 20 { 21 asm volatile (" \ 22 r1 = 0; \ 23 *(u64*)(r10 - 8) = r1; \ 24 r2 = r10; \ 25 r2 += -8; \ 26 r1 = %[map_hash_8b] ll; \ 27 call %[bpf_map_lookup_elem]; \ 28 if r0 == 0 goto l0_%=; \ 29 r1 = *(u8*)(r0 + 0); \ 30 if r1 > 0xff goto l0_%=; \ 31 r3 = *(u8*)(r0 + 1); \ 32 if r3 > 0xff goto l0_%=; \ 33 r1 -= r3; \ 34 r1 >>= 56; \ 35 r0 += r1; \ 36 r0 = *(u8*)(r0 + 0); \ 37 exit; \ 38 l0_%=: r0 = 0; \ 39 exit; \ 40 " : 41 : __imm(bpf_map_lookup_elem), 42 __imm_addr(map_hash_8b) 43 : __clobber_all); 44 } 45 46 SEC("socket") 47 __description("subtraction bounds (map value) variant 2") 48 __failure 49 __msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.") 50 __msg_unpriv("R1 has unknown scalar with mixed signed bounds") 51 __naked void bounds_map_value_variant_2(void) 52 { 53 asm volatile (" \ 54 r1 = 0; \ 55 *(u64*)(r10 - 8) = r1; \ 56 r2 = r10; \ 57 r2 += -8; \ 58 r1 = %[map_hash_8b] ll; \ 59 call %[bpf_map_lookup_elem]; \ 60 if r0 == 0 goto l0_%=; \ 61 r1 = *(u8*)(r0 + 0); \ 62 if r1 > 0xff goto l0_%=; \ 63 r3 = *(u8*)(r0 + 1); \ 64 if r3 > 0xff goto l0_%=; \ 65 r1 -= r3; \ 66 r0 += r1; \ 67 r0 = *(u8*)(r0 + 0); \ 68 exit; \ 69 l0_%=: r0 = 0; \ 70 exit; \ 71 " : 72 : __imm(bpf_map_lookup_elem), 73 __imm_addr(map_hash_8b) 74 : __clobber_all); 75 } 76 77 SEC("socket") 78 __description("check subtraction on pointers for unpriv") 79 __success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited") 80 __retval(0) 81 __naked void subtraction_on_pointers_for_unpriv(void) 82 { 83 asm volatile (" \ 84 r0 = 0; \ 85 r1 = %[map_hash_8b] ll; \ 86 r2 = r10; \ 87 r2 += -8; \ 88 r6 = 9; \ 89 *(u64*)(r2 + 0) = r6; \ 90 call %[bpf_map_lookup_elem]; \ 91 r9 = r10; \ 92 r9 -= r0; \ 93 r1 = %[map_hash_8b] ll; \ 94 r2 = r10; \ 95 r2 += -8; \ 96 r6 = 0; \ 97 *(u64*)(r2 + 0) = r6; \ 98 call %[bpf_map_lookup_elem]; \ 99 if r0 != 0 goto l0_%=; \ 100 exit; \ 101 l0_%=: *(u64*)(r0 + 0) = r9; \ 102 r0 = 0; \ 103 exit; \ 104 " : 105 : __imm(bpf_map_lookup_elem), 106 __imm_addr(map_hash_8b) 107 : __clobber_all); 108 } 109 110 SEC("socket") 111 __description("bounds check based on zero-extended MOV") 112 __success __success_unpriv __retval(0) 113 __naked void based_on_zero_extended_mov(void) 114 { 115 asm volatile (" \ 116 r1 = 0; \ 117 *(u64*)(r10 - 8) = r1; \ 118 r2 = r10; \ 119 r2 += -8; \ 120 r1 = %[map_hash_8b] ll; \ 121 call %[bpf_map_lookup_elem]; \ 122 if r0 == 0 goto l0_%=; \ 123 /* r2 = 0x0000'0000'ffff'ffff */ \ 124 w2 = 0xffffffff; \ 125 /* r2 = 0 */ \ 126 r2 >>= 32; \ 127 /* no-op */ \ 128 r0 += r2; \ 129 /* access at offset 0 */ \ 130 r0 = *(u8*)(r0 + 0); \ 131 l0_%=: /* exit */ \ 132 r0 = 0; \ 133 exit; \ 134 " : 135 : __imm(bpf_map_lookup_elem), 136 __imm_addr(map_hash_8b) 137 : __clobber_all); 138 } 139 140 SEC("socket") 141 __description("bounds check based on sign-extended MOV. test1") 142 __failure __msg("map_value pointer and 4294967295") 143 __failure_unpriv 144 __naked void on_sign_extended_mov_test1(void) 145 { 146 asm volatile (" \ 147 r1 = 0; \ 148 *(u64*)(r10 - 8) = r1; \ 149 r2 = r10; \ 150 r2 += -8; \ 151 r1 = %[map_hash_8b] ll; \ 152 call %[bpf_map_lookup_elem]; \ 153 if r0 == 0 goto l0_%=; \ 154 /* r2 = 0xffff'ffff'ffff'ffff */ \ 155 r2 = 0xffffffff; \ 156 /* r2 = 0xffff'ffff */ \ 157 r2 >>= 32; \ 158 /* r0 = <oob pointer> */ \ 159 r0 += r2; \ 160 /* access to OOB pointer */ \ 161 r0 = *(u8*)(r0 + 0); \ 162 l0_%=: /* exit */ \ 163 r0 = 0; \ 164 exit; \ 165 " : 166 : __imm(bpf_map_lookup_elem), 167 __imm_addr(map_hash_8b) 168 : __clobber_all); 169 } 170 171 SEC("socket") 172 __description("bounds check based on sign-extended MOV. test2") 173 __failure __msg("R0 min value is outside of the allowed memory range") 174 __failure_unpriv 175 __naked void on_sign_extended_mov_test2(void) 176 { 177 asm volatile (" \ 178 r1 = 0; \ 179 *(u64*)(r10 - 8) = r1; \ 180 r2 = r10; \ 181 r2 += -8; \ 182 r1 = %[map_hash_8b] ll; \ 183 call %[bpf_map_lookup_elem]; \ 184 if r0 == 0 goto l0_%=; \ 185 /* r2 = 0xffff'ffff'ffff'ffff */ \ 186 r2 = 0xffffffff; \ 187 /* r2 = 0xfff'ffff */ \ 188 r2 >>= 36; \ 189 /* r0 = <oob pointer> */ \ 190 r0 += r2; \ 191 /* access to OOB pointer */ \ 192 r0 = *(u8*)(r0 + 0); \ 193 l0_%=: /* exit */ \ 194 r0 = 0; \ 195 exit; \ 196 " : 197 : __imm(bpf_map_lookup_elem), 198 __imm_addr(map_hash_8b) 199 : __clobber_all); 200 } 201 202 SEC("tc") 203 __description("bounds check based on reg_off + var_off + insn_off. test1") 204 __failure __msg("value_size=8 off=1073741825") 205 __naked void var_off_insn_off_test1(void) 206 { 207 asm volatile (" \ 208 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ 209 r1 = 0; \ 210 *(u64*)(r10 - 8) = r1; \ 211 r2 = r10; \ 212 r2 += -8; \ 213 r1 = %[map_hash_8b] ll; \ 214 call %[bpf_map_lookup_elem]; \ 215 if r0 == 0 goto l0_%=; \ 216 r6 &= 1; \ 217 r6 += %[__imm_0]; \ 218 r0 += r6; \ 219 r0 += %[__imm_0]; \ 220 l0_%=: r0 = *(u8*)(r0 + 3); \ 221 r0 = 0; \ 222 exit; \ 223 " : 224 : __imm(bpf_map_lookup_elem), 225 __imm_addr(map_hash_8b), 226 __imm_const(__imm_0, (1 << 29) - 1), 227 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) 228 : __clobber_all); 229 } 230 231 SEC("tc") 232 __description("bounds check based on reg_off + var_off + insn_off. test2") 233 __failure __msg("value 1073741823") 234 __naked void var_off_insn_off_test2(void) 235 { 236 asm volatile (" \ 237 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ 238 r1 = 0; \ 239 *(u64*)(r10 - 8) = r1; \ 240 r2 = r10; \ 241 r2 += -8; \ 242 r1 = %[map_hash_8b] ll; \ 243 call %[bpf_map_lookup_elem]; \ 244 if r0 == 0 goto l0_%=; \ 245 r6 &= 1; \ 246 r6 += %[__imm_0]; \ 247 r0 += r6; \ 248 r0 += %[__imm_1]; \ 249 l0_%=: r0 = *(u8*)(r0 + 3); \ 250 r0 = 0; \ 251 exit; \ 252 " : 253 : __imm(bpf_map_lookup_elem), 254 __imm_addr(map_hash_8b), 255 __imm_const(__imm_0, (1 << 30) - 1), 256 __imm_const(__imm_1, (1 << 29) - 1), 257 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) 258 : __clobber_all); 259 } 260 261 SEC("socket") 262 __description("bounds check after truncation of non-boundary-crossing range") 263 __success __success_unpriv __retval(0) 264 __naked void of_non_boundary_crossing_range(void) 265 { 266 asm volatile (" \ 267 r1 = 0; \ 268 *(u64*)(r10 - 8) = r1; \ 269 r2 = r10; \ 270 r2 += -8; \ 271 r1 = %[map_hash_8b] ll; \ 272 call %[bpf_map_lookup_elem]; \ 273 if r0 == 0 goto l0_%=; \ 274 /* r1 = [0x00, 0xff] */ \ 275 r1 = *(u8*)(r0 + 0); \ 276 r2 = 1; \ 277 /* r2 = 0x10'0000'0000 */ \ 278 r2 <<= 36; \ 279 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \ 280 r1 += r2; \ 281 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \ 282 r1 += 0x7fffffff; \ 283 /* r1 = [0x00, 0xff] */ \ 284 w1 -= 0x7fffffff; \ 285 /* r1 = 0 */ \ 286 r1 >>= 8; \ 287 /* no-op */ \ 288 r0 += r1; \ 289 /* access at offset 0 */ \ 290 r0 = *(u8*)(r0 + 0); \ 291 l0_%=: /* exit */ \ 292 r0 = 0; \ 293 exit; \ 294 " : 295 : __imm(bpf_map_lookup_elem), 296 __imm_addr(map_hash_8b) 297 : __clobber_all); 298 } 299 300 SEC("socket") 301 __description("bounds check after truncation of boundary-crossing range (1)") 302 __failure 303 /* not actually fully unbounded, but the bound is very high */ 304 __msg("value -4294967168 makes map_value pointer be out of bounds") 305 __failure_unpriv 306 __naked void of_boundary_crossing_range_1(void) 307 { 308 asm volatile (" \ 309 r1 = 0; \ 310 *(u64*)(r10 - 8) = r1; \ 311 r2 = r10; \ 312 r2 += -8; \ 313 r1 = %[map_hash_8b] ll; \ 314 call %[bpf_map_lookup_elem]; \ 315 if r0 == 0 goto l0_%=; \ 316 /* r1 = [0x00, 0xff] */ \ 317 r1 = *(u8*)(r0 + 0); \ 318 r1 += %[__imm_0]; \ 319 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ 320 r1 += %[__imm_0]; \ 321 /* r1 = [0xffff'ff80, 0xffff'ffff] or \ 322 * [0x0000'0000, 0x0000'007f] \ 323 */ \ 324 w1 += 0; \ 325 r1 -= %[__imm_0]; \ 326 /* r1 = [0x00, 0xff] or \ 327 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ 328 */ \ 329 r1 -= %[__imm_0]; \ 330 /* error on OOB pointer computation */ \ 331 r0 += r1; \ 332 /* exit */ \ 333 r0 = 0; \ 334 l0_%=: exit; \ 335 " : 336 : __imm(bpf_map_lookup_elem), 337 __imm_addr(map_hash_8b), 338 __imm_const(__imm_0, 0xffffff80 >> 1) 339 : __clobber_all); 340 } 341 342 SEC("socket") 343 __description("bounds check after truncation of boundary-crossing range (2)") 344 __failure __msg("value -4294967168 makes map_value pointer be out of bounds") 345 __failure_unpriv 346 __naked void of_boundary_crossing_range_2(void) 347 { 348 asm volatile (" \ 349 r1 = 0; \ 350 *(u64*)(r10 - 8) = r1; \ 351 r2 = r10; \ 352 r2 += -8; \ 353 r1 = %[map_hash_8b] ll; \ 354 call %[bpf_map_lookup_elem]; \ 355 if r0 == 0 goto l0_%=; \ 356 /* r1 = [0x00, 0xff] */ \ 357 r1 = *(u8*)(r0 + 0); \ 358 r1 += %[__imm_0]; \ 359 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ 360 r1 += %[__imm_0]; \ 361 /* r1 = [0xffff'ff80, 0xffff'ffff] or \ 362 * [0x0000'0000, 0x0000'007f] \ 363 * difference to previous test: truncation via MOV32\ 364 * instead of ALU32. \ 365 */ \ 366 w1 = w1; \ 367 r1 -= %[__imm_0]; \ 368 /* r1 = [0x00, 0xff] or \ 369 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ 370 */ \ 371 r1 -= %[__imm_0]; \ 372 /* error on OOB pointer computation */ \ 373 r0 += r1; \ 374 /* exit */ \ 375 r0 = 0; \ 376 l0_%=: exit; \ 377 " : 378 : __imm(bpf_map_lookup_elem), 379 __imm_addr(map_hash_8b), 380 __imm_const(__imm_0, 0xffffff80 >> 1) 381 : __clobber_all); 382 } 383 384 SEC("socket") 385 __description("bounds check after wrapping 32-bit addition") 386 __success __success_unpriv __retval(0) 387 __naked void after_wrapping_32_bit_addition(void) 388 { 389 asm volatile (" \ 390 r1 = 0; \ 391 *(u64*)(r10 - 8) = r1; \ 392 r2 = r10; \ 393 r2 += -8; \ 394 r1 = %[map_hash_8b] ll; \ 395 call %[bpf_map_lookup_elem]; \ 396 if r0 == 0 goto l0_%=; \ 397 /* r1 = 0x7fff'ffff */ \ 398 r1 = 0x7fffffff; \ 399 /* r1 = 0xffff'fffe */ \ 400 r1 += 0x7fffffff; \ 401 /* r1 = 0 */ \ 402 w1 += 2; \ 403 /* no-op */ \ 404 r0 += r1; \ 405 /* access at offset 0 */ \ 406 r0 = *(u8*)(r0 + 0); \ 407 l0_%=: /* exit */ \ 408 r0 = 0; \ 409 exit; \ 410 " : 411 : __imm(bpf_map_lookup_elem), 412 __imm_addr(map_hash_8b) 413 : __clobber_all); 414 } 415 416 SEC("socket") 417 __description("bounds check after shift with oversized count operand") 418 __failure __msg("R0 max value is outside of the allowed memory range") 419 __failure_unpriv 420 __naked void shift_with_oversized_count_operand(void) 421 { 422 asm volatile (" \ 423 r1 = 0; \ 424 *(u64*)(r10 - 8) = r1; \ 425 r2 = r10; \ 426 r2 += -8; \ 427 r1 = %[map_hash_8b] ll; \ 428 call %[bpf_map_lookup_elem]; \ 429 if r0 == 0 goto l0_%=; \ 430 r2 = 32; \ 431 r1 = 1; \ 432 /* r1 = (u32)1 << (u32)32 = ? */ \ 433 w1 <<= w2; \ 434 /* r1 = [0x0000, 0xffff] */ \ 435 r1 &= 0xffff; \ 436 /* computes unknown pointer, potentially OOB */ \ 437 r0 += r1; \ 438 /* potentially OOB access */ \ 439 r0 = *(u8*)(r0 + 0); \ 440 l0_%=: /* exit */ \ 441 r0 = 0; \ 442 exit; \ 443 " : 444 : __imm(bpf_map_lookup_elem), 445 __imm_addr(map_hash_8b) 446 : __clobber_all); 447 } 448 449 SEC("socket") 450 __description("bounds check after right shift of maybe-negative number") 451 __failure __msg("R0 unbounded memory access") 452 __failure_unpriv 453 __naked void shift_of_maybe_negative_number(void) 454 { 455 asm volatile (" \ 456 r1 = 0; \ 457 *(u64*)(r10 - 8) = r1; \ 458 r2 = r10; \ 459 r2 += -8; \ 460 r1 = %[map_hash_8b] ll; \ 461 call %[bpf_map_lookup_elem]; \ 462 if r0 == 0 goto l0_%=; \ 463 /* r1 = [0x00, 0xff] */ \ 464 r1 = *(u8*)(r0 + 0); \ 465 /* r1 = [-0x01, 0xfe] */ \ 466 r1 -= 1; \ 467 /* r1 = 0 or 0xff'ffff'ffff'ffff */ \ 468 r1 >>= 8; \ 469 /* r1 = 0 or 0xffff'ffff'ffff */ \ 470 r1 >>= 8; \ 471 /* computes unknown pointer, potentially OOB */ \ 472 r0 += r1; \ 473 /* potentially OOB access */ \ 474 r0 = *(u8*)(r0 + 0); \ 475 l0_%=: /* exit */ \ 476 r0 = 0; \ 477 exit; \ 478 " : 479 : __imm(bpf_map_lookup_elem), 480 __imm_addr(map_hash_8b) 481 : __clobber_all); 482 } 483 484 SEC("socket") 485 __description("bounds check after 32-bit right shift with 64-bit input") 486 __failure __msg("math between map_value pointer and 4294967294 is not allowed") 487 __failure_unpriv 488 __naked void shift_with_64_bit_input(void) 489 { 490 asm volatile (" \ 491 r1 = 0; \ 492 *(u64*)(r10 - 8) = r1; \ 493 r2 = r10; \ 494 r2 += -8; \ 495 r1 = %[map_hash_8b] ll; \ 496 call %[bpf_map_lookup_elem]; \ 497 if r0 == 0 goto l0_%=; \ 498 r1 = 2; \ 499 /* r1 = 1<<32 */ \ 500 r1 <<= 31; \ 501 /* r1 = 0 (NOT 2!) */ \ 502 w1 >>= 31; \ 503 /* r1 = 0xffff'fffe (NOT 0!) */ \ 504 w1 -= 2; \ 505 /* error on computing OOB pointer */ \ 506 r0 += r1; \ 507 /* exit */ \ 508 r0 = 0; \ 509 l0_%=: exit; \ 510 " : 511 : __imm(bpf_map_lookup_elem), 512 __imm_addr(map_hash_8b) 513 : __clobber_all); 514 } 515 516 SEC("socket") 517 __description("bounds check map access with off+size signed 32bit overflow. test1") 518 __failure __msg("map_value pointer and 2147483646") 519 __failure_unpriv 520 __naked void size_signed_32bit_overflow_test1(void) 521 { 522 asm volatile (" \ 523 r1 = 0; \ 524 *(u64*)(r10 - 8) = r1; \ 525 r2 = r10; \ 526 r2 += -8; \ 527 r1 = %[map_hash_8b] ll; \ 528 call %[bpf_map_lookup_elem]; \ 529 if r0 != 0 goto l0_%=; \ 530 exit; \ 531 l0_%=: r0 += 0x7ffffffe; \ 532 r0 = *(u64*)(r0 + 0); \ 533 goto l1_%=; \ 534 l1_%=: exit; \ 535 " : 536 : __imm(bpf_map_lookup_elem), 537 __imm_addr(map_hash_8b) 538 : __clobber_all); 539 } 540 541 SEC("socket") 542 __description("bounds check map access with off+size signed 32bit overflow. test2") 543 __failure __msg("pointer offset 1073741822") 544 __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 545 __naked void size_signed_32bit_overflow_test2(void) 546 { 547 asm volatile (" \ 548 r1 = 0; \ 549 *(u64*)(r10 - 8) = r1; \ 550 r2 = r10; \ 551 r2 += -8; \ 552 r1 = %[map_hash_8b] ll; \ 553 call %[bpf_map_lookup_elem]; \ 554 if r0 != 0 goto l0_%=; \ 555 exit; \ 556 l0_%=: r0 += 0x1fffffff; \ 557 r0 += 0x1fffffff; \ 558 r0 += 0x1fffffff; \ 559 r0 = *(u64*)(r0 + 0); \ 560 goto l1_%=; \ 561 l1_%=: exit; \ 562 " : 563 : __imm(bpf_map_lookup_elem), 564 __imm_addr(map_hash_8b) 565 : __clobber_all); 566 } 567 568 SEC("socket") 569 __description("bounds check map access with off+size signed 32bit overflow. test3") 570 __failure __msg("pointer offset -1073741822") 571 __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 572 __naked void size_signed_32bit_overflow_test3(void) 573 { 574 asm volatile (" \ 575 r1 = 0; \ 576 *(u64*)(r10 - 8) = r1; \ 577 r2 = r10; \ 578 r2 += -8; \ 579 r1 = %[map_hash_8b] ll; \ 580 call %[bpf_map_lookup_elem]; \ 581 if r0 != 0 goto l0_%=; \ 582 exit; \ 583 l0_%=: r0 -= 0x1fffffff; \ 584 r0 -= 0x1fffffff; \ 585 r0 = *(u64*)(r0 + 2); \ 586 goto l1_%=; \ 587 l1_%=: exit; \ 588 " : 589 : __imm(bpf_map_lookup_elem), 590 __imm_addr(map_hash_8b) 591 : __clobber_all); 592 } 593 594 SEC("socket") 595 __description("bounds check map access with off+size signed 32bit overflow. test4") 596 __failure __msg("map_value pointer and 1000000000000") 597 __failure_unpriv 598 __naked void size_signed_32bit_overflow_test4(void) 599 { 600 asm volatile (" \ 601 r1 = 0; \ 602 *(u64*)(r10 - 8) = r1; \ 603 r2 = r10; \ 604 r2 += -8; \ 605 r1 = %[map_hash_8b] ll; \ 606 call %[bpf_map_lookup_elem]; \ 607 if r0 != 0 goto l0_%=; \ 608 exit; \ 609 l0_%=: r1 = 1000000; \ 610 r1 *= 1000000; \ 611 r0 += r1; \ 612 r0 = *(u64*)(r0 + 2); \ 613 goto l1_%=; \ 614 l1_%=: exit; \ 615 " : 616 : __imm(bpf_map_lookup_elem), 617 __imm_addr(map_hash_8b) 618 : __clobber_all); 619 } 620 621 SEC("socket") 622 __description("bounds check mixed 32bit and 64bit arithmetic. test1") 623 __success __success_unpriv 624 __retval(0) 625 #ifdef SPEC_V1 626 __xlated_unpriv("goto pc+2") 627 __xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ 628 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 629 __xlated_unpriv("exit") 630 #endif 631 __naked void _32bit_and_64bit_arithmetic_test1(void) 632 { 633 asm volatile (" \ 634 r0 = 0; \ 635 r1 = -1; \ 636 r1 <<= 32; \ 637 r1 += 1; \ 638 /* r1 = 0xffffFFFF00000001 */ \ 639 if w1 > 1 goto l0_%=; \ 640 /* check ALU64 op keeps 32bit bounds */ \ 641 r1 += 1; \ 642 if w1 > 2 goto l0_%=; \ 643 goto l1_%=; \ 644 l0_%=: /* invalid ldx if bounds are lost above */ \ 645 r0 = *(u64*)(r0 - 1); \ 646 l1_%=: exit; \ 647 " ::: __clobber_all); 648 } 649 650 SEC("socket") 651 __description("bounds check mixed 32bit and 64bit arithmetic. test2") 652 __success __success_unpriv 653 __retval(0) 654 #ifdef SPEC_V1 655 __xlated_unpriv("goto pc+2") 656 __xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ 657 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 658 __xlated_unpriv("exit") 659 #endif 660 __naked void _32bit_and_64bit_arithmetic_test2(void) 661 { 662 asm volatile (" \ 663 r0 = 0; \ 664 r1 = -1; \ 665 r1 <<= 32; \ 666 r1 += 1; \ 667 /* r1 = 0xffffFFFF00000001 */ \ 668 r2 = 3; \ 669 /* r1 = 0x2 */ \ 670 w1 += 1; \ 671 /* check ALU32 op zero extends 64bit bounds */ \ 672 if r1 > r2 goto l0_%=; \ 673 goto l1_%=; \ 674 l0_%=: /* invalid ldx if bounds are lost above */ \ 675 r0 = *(u64*)(r0 - 1); \ 676 l1_%=: exit; \ 677 " ::: __clobber_all); 678 } 679 680 SEC("tc") 681 __description("assigning 32bit bounds to 64bit for wA = 0, wB = wA") 682 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) 683 __naked void for_wa_0_wb_wa(void) 684 { 685 asm volatile (" \ 686 r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 687 r7 = *(u32*)(r1 + %[__sk_buff_data]); \ 688 w9 = 0; \ 689 w2 = w9; \ 690 r6 = r7; \ 691 r6 += r2; \ 692 r3 = r6; \ 693 r3 += 8; \ 694 if r3 > r8 goto l0_%=; \ 695 r5 = *(u32*)(r6 + 0); \ 696 l0_%=: r0 = 0; \ 697 exit; \ 698 " : 699 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 700 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 701 : __clobber_all); 702 } 703 704 SEC("socket") 705 __description("bounds check for reg = 0, reg xor 1") 706 __success __success_unpriv 707 __retval(0) 708 #ifdef SPEC_V1 709 __xlated_unpriv("if r1 != 0x0 goto pc+2") 710 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 711 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 712 __xlated_unpriv("r0 = 0") 713 #endif 714 __naked void reg_0_reg_xor_1(void) 715 { 716 asm volatile (" \ 717 r1 = 0; \ 718 *(u64*)(r10 - 8) = r1; \ 719 r2 = r10; \ 720 r2 += -8; \ 721 r1 = %[map_hash_8b] ll; \ 722 call %[bpf_map_lookup_elem]; \ 723 if r0 != 0 goto l0_%=; \ 724 exit; \ 725 l0_%=: r1 = 0; \ 726 r1 ^= 1; \ 727 if r1 != 0 goto l1_%=; \ 728 r0 = *(u64*)(r0 + 8); \ 729 l1_%=: r0 = 0; \ 730 exit; \ 731 " : 732 : __imm(bpf_map_lookup_elem), 733 __imm_addr(map_hash_8b) 734 : __clobber_all); 735 } 736 737 SEC("socket") 738 __description("bounds check for reg32 = 0, reg32 xor 1") 739 __success __success_unpriv 740 __retval(0) 741 #ifdef SPEC_V1 742 __xlated_unpriv("if w1 != 0x0 goto pc+2") 743 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 744 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 745 __xlated_unpriv("r0 = 0") 746 #endif 747 __naked void reg32_0_reg32_xor_1(void) 748 { 749 asm volatile (" \ 750 r1 = 0; \ 751 *(u64*)(r10 - 8) = r1; \ 752 r2 = r10; \ 753 r2 += -8; \ 754 r1 = %[map_hash_8b] ll; \ 755 call %[bpf_map_lookup_elem]; \ 756 if r0 != 0 goto l0_%=; \ 757 exit; \ 758 l0_%=: w1 = 0; \ 759 w1 ^= 1; \ 760 if w1 != 0 goto l1_%=; \ 761 r0 = *(u64*)(r0 + 8); \ 762 l1_%=: r0 = 0; \ 763 exit; \ 764 " : 765 : __imm(bpf_map_lookup_elem), 766 __imm_addr(map_hash_8b) 767 : __clobber_all); 768 } 769 770 SEC("socket") 771 __description("bounds check for reg = 2, reg xor 3") 772 __success __success_unpriv 773 __retval(0) 774 #ifdef SPEC_V1 775 __xlated_unpriv("if r1 > 0x0 goto pc+2") 776 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 777 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 778 __xlated_unpriv("r0 = 0") 779 #endif 780 __naked void reg_2_reg_xor_3(void) 781 { 782 asm volatile (" \ 783 r1 = 0; \ 784 *(u64*)(r10 - 8) = r1; \ 785 r2 = r10; \ 786 r2 += -8; \ 787 r1 = %[map_hash_8b] ll; \ 788 call %[bpf_map_lookup_elem]; \ 789 if r0 != 0 goto l0_%=; \ 790 exit; \ 791 l0_%=: r1 = 2; \ 792 r1 ^= 3; \ 793 if r1 > 0 goto l1_%=; \ 794 r0 = *(u64*)(r0 + 8); \ 795 l1_%=: r0 = 0; \ 796 exit; \ 797 " : 798 : __imm(bpf_map_lookup_elem), 799 __imm_addr(map_hash_8b) 800 : __clobber_all); 801 } 802 803 SEC("socket") 804 __description("bounds check for reg = any, reg xor 3") 805 __failure __msg("invalid access to map value") 806 __msg_unpriv("invalid access to map value") 807 __naked void reg_any_reg_xor_3(void) 808 { 809 asm volatile (" \ 810 r1 = 0; \ 811 *(u64*)(r10 - 8) = r1; \ 812 r2 = r10; \ 813 r2 += -8; \ 814 r1 = %[map_hash_8b] ll; \ 815 call %[bpf_map_lookup_elem]; \ 816 if r0 != 0 goto l0_%=; \ 817 exit; \ 818 l0_%=: r1 = *(u64*)(r0 + 0); \ 819 r1 ^= 3; \ 820 if r1 != 0 goto l1_%=; \ 821 r0 = *(u64*)(r0 + 8); \ 822 l1_%=: r0 = 0; \ 823 exit; \ 824 " : 825 : __imm(bpf_map_lookup_elem), 826 __imm_addr(map_hash_8b) 827 : __clobber_all); 828 } 829 830 SEC("socket") 831 __description("bounds check for reg32 = any, reg32 xor 3") 832 __failure __msg("invalid access to map value") 833 __msg_unpriv("invalid access to map value") 834 __naked void reg32_any_reg32_xor_3(void) 835 { 836 asm volatile (" \ 837 r1 = 0; \ 838 *(u64*)(r10 - 8) = r1; \ 839 r2 = r10; \ 840 r2 += -8; \ 841 r1 = %[map_hash_8b] ll; \ 842 call %[bpf_map_lookup_elem]; \ 843 if r0 != 0 goto l0_%=; \ 844 exit; \ 845 l0_%=: r1 = *(u64*)(r0 + 0); \ 846 w1 ^= 3; \ 847 if w1 != 0 goto l1_%=; \ 848 r0 = *(u64*)(r0 + 8); \ 849 l1_%=: r0 = 0; \ 850 exit; \ 851 " : 852 : __imm(bpf_map_lookup_elem), 853 __imm_addr(map_hash_8b) 854 : __clobber_all); 855 } 856 857 SEC("socket") 858 __description("bounds check for reg > 0, reg xor 3") 859 __success __success_unpriv 860 __retval(0) 861 #ifdef SPEC_V1 862 __xlated_unpriv("if r1 >= 0x0 goto pc+2") 863 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 864 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 865 __xlated_unpriv("r0 = 0") 866 #endif 867 __naked void reg_0_reg_xor_3(void) 868 { 869 asm volatile (" \ 870 r1 = 0; \ 871 *(u64*)(r10 - 8) = r1; \ 872 r2 = r10; \ 873 r2 += -8; \ 874 r1 = %[map_hash_8b] ll; \ 875 call %[bpf_map_lookup_elem]; \ 876 if r0 != 0 goto l0_%=; \ 877 exit; \ 878 l0_%=: r1 = *(u64*)(r0 + 0); \ 879 if r1 <= 0 goto l1_%=; \ 880 r1 ^= 3; \ 881 if r1 >= 0 goto l1_%=; \ 882 r0 = *(u64*)(r0 + 8); \ 883 l1_%=: r0 = 0; \ 884 exit; \ 885 " : 886 : __imm(bpf_map_lookup_elem), 887 __imm_addr(map_hash_8b) 888 : __clobber_all); 889 } 890 891 SEC("socket") 892 __description("bounds check for reg32 > 0, reg32 xor 3") 893 __success __success_unpriv 894 __retval(0) 895 #ifdef SPEC_V1 896 __xlated_unpriv("if w1 >= 0x0 goto pc+2") 897 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 898 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 899 __xlated_unpriv("r0 = 0") 900 #endif 901 __naked void reg32_0_reg32_xor_3(void) 902 { 903 asm volatile (" \ 904 r1 = 0; \ 905 *(u64*)(r10 - 8) = r1; \ 906 r2 = r10; \ 907 r2 += -8; \ 908 r1 = %[map_hash_8b] ll; \ 909 call %[bpf_map_lookup_elem]; \ 910 if r0 != 0 goto l0_%=; \ 911 exit; \ 912 l0_%=: r1 = *(u64*)(r0 + 0); \ 913 if w1 <= 0 goto l1_%=; \ 914 w1 ^= 3; \ 915 if w1 >= 0 goto l1_%=; \ 916 r0 = *(u64*)(r0 + 8); \ 917 l1_%=: r0 = 0; \ 918 exit; \ 919 " : 920 : __imm(bpf_map_lookup_elem), 921 __imm_addr(map_hash_8b) 922 : __clobber_all); 923 } 924 925 SEC("socket") 926 __description("bounds check for non const xor src dst") 927 __success __log_level(2) 928 __msg("5: (af) r0 ^= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))") 929 __naked void non_const_xor_src_dst(void) 930 { 931 asm volatile (" \ 932 call %[bpf_get_prandom_u32]; \ 933 r6 = r0; \ 934 call %[bpf_get_prandom_u32]; \ 935 r6 &= 0xaf; \ 936 r0 &= 0x1a0; \ 937 r0 ^= r6; \ 938 exit; \ 939 " : 940 : __imm(bpf_map_lookup_elem), 941 __imm_addr(map_hash_8b), 942 __imm(bpf_get_prandom_u32) 943 : __clobber_all); 944 } 945 946 SEC("socket") 947 __description("bounds check for non const or src dst") 948 __success __log_level(2) 949 __msg("5: (4f) r0 |= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))") 950 __naked void non_const_or_src_dst(void) 951 { 952 asm volatile (" \ 953 call %[bpf_get_prandom_u32]; \ 954 r6 = r0; \ 955 call %[bpf_get_prandom_u32]; \ 956 r6 &= 0xaf; \ 957 r0 &= 0x1a0; \ 958 r0 |= r6; \ 959 exit; \ 960 " : 961 : __imm(bpf_map_lookup_elem), 962 __imm_addr(map_hash_8b), 963 __imm(bpf_get_prandom_u32) 964 : __clobber_all); 965 } 966 967 SEC("socket") 968 __description("bounds check for non const mul regs") 969 __success __log_level(2) 970 __msg("5: (2f) r0 *= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=3825,var_off=(0x0; 0xfff))") 971 __naked void non_const_mul_regs(void) 972 { 973 asm volatile (" \ 974 call %[bpf_get_prandom_u32]; \ 975 r6 = r0; \ 976 call %[bpf_get_prandom_u32]; \ 977 r6 &= 0xff; \ 978 r0 &= 0x0f; \ 979 r0 *= r6; \ 980 exit; \ 981 " : 982 : __imm(bpf_map_lookup_elem), 983 __imm_addr(map_hash_8b), 984 __imm(bpf_get_prandom_u32) 985 : __clobber_all); 986 } 987 988 SEC("socket") 989 __description("bounds checks after 32-bit truncation. test 1") 990 __success __failure_unpriv __msg_unpriv("R0 leaks addr") 991 __retval(0) 992 __naked void _32_bit_truncation_test_1(void) 993 { 994 asm volatile (" \ 995 r1 = 0; \ 996 *(u64*)(r10 - 8) = r1; \ 997 r2 = r10; \ 998 r2 += -8; \ 999 r1 = %[map_hash_8b] ll; \ 1000 call %[bpf_map_lookup_elem]; \ 1001 if r0 == 0 goto l0_%=; \ 1002 r1 = *(u32*)(r0 + 0); \ 1003 /* This used to reduce the max bound to 0x7fffffff */\ 1004 if r1 == 0 goto l1_%=; \ 1005 if r1 > 0x7fffffff goto l0_%=; \ 1006 l1_%=: r0 = 0; \ 1007 l0_%=: exit; \ 1008 " : 1009 : __imm(bpf_map_lookup_elem), 1010 __imm_addr(map_hash_8b) 1011 : __clobber_all); 1012 } 1013 1014 SEC("socket") 1015 __description("bounds checks after 32-bit truncation. test 2") 1016 __success __failure_unpriv __msg_unpriv("R0 leaks addr") 1017 __retval(0) 1018 __naked void _32_bit_truncation_test_2(void) 1019 { 1020 asm volatile (" \ 1021 r1 = 0; \ 1022 *(u64*)(r10 - 8) = r1; \ 1023 r2 = r10; \ 1024 r2 += -8; \ 1025 r1 = %[map_hash_8b] ll; \ 1026 call %[bpf_map_lookup_elem]; \ 1027 if r0 == 0 goto l0_%=; \ 1028 r1 = *(u32*)(r0 + 0); \ 1029 if r1 s< 1 goto l1_%=; \ 1030 if w1 s< 0 goto l0_%=; \ 1031 l1_%=: r0 = 0; \ 1032 l0_%=: exit; \ 1033 " : 1034 : __imm(bpf_map_lookup_elem), 1035 __imm_addr(map_hash_8b) 1036 : __clobber_all); 1037 } 1038 1039 SEC("xdp") 1040 __description("bound check with JMP_JLT for crossing 64-bit signed boundary") 1041 __success __retval(0) 1042 __naked void crossing_64_bit_signed_boundary_1(void) 1043 { 1044 asm volatile (" \ 1045 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1046 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1047 r1 = r2; \ 1048 r1 += 1; \ 1049 if r1 > r3 goto l0_%=; \ 1050 r1 = *(u8*)(r2 + 0); \ 1051 r0 = 0x7fffffffffffff10 ll; \ 1052 r1 += r0; \ 1053 r0 = 0x8000000000000000 ll; \ 1054 l1_%=: r0 += 1; \ 1055 /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\ 1056 if r0 < r1 goto l1_%=; \ 1057 l0_%=: r0 = 0; \ 1058 exit; \ 1059 " : 1060 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1061 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1062 : __clobber_all); 1063 } 1064 1065 SEC("xdp") 1066 __description("bound check with JMP_JSLT for crossing 64-bit signed boundary") 1067 __success __retval(0) 1068 __flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */ 1069 __naked void crossing_64_bit_signed_boundary_2(void) 1070 { 1071 asm volatile (" \ 1072 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1073 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1074 r1 = r2; \ 1075 r1 += 1; \ 1076 if r1 > r3 goto l0_%=; \ 1077 r1 = *(u8*)(r2 + 0); \ 1078 r0 = 0x7fffffffffffff10 ll; \ 1079 r1 += r0; \ 1080 r2 = 0x8000000000000fff ll; \ 1081 r0 = 0x8000000000000000 ll; \ 1082 l1_%=: r0 += 1; \ 1083 if r0 s> r2 goto l0_%=; \ 1084 /* r1 signed range is [S64_MIN, S64_MAX] */ \ 1085 if r0 s< r1 goto l1_%=; \ 1086 r0 = 1; \ 1087 exit; \ 1088 l0_%=: r0 = 0; \ 1089 exit; \ 1090 " : 1091 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1092 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1093 : __clobber_all); 1094 } 1095 1096 SEC("xdp") 1097 __description("bound check for loop upper bound greater than U32_MAX") 1098 __success __retval(0) 1099 __naked void bound_greater_than_u32_max(void) 1100 { 1101 asm volatile (" \ 1102 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1103 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1104 r1 = r2; \ 1105 r1 += 1; \ 1106 if r1 > r3 goto l0_%=; \ 1107 r1 = *(u8*)(r2 + 0); \ 1108 r0 = 0x100000000 ll; \ 1109 r1 += r0; \ 1110 r0 = 0x100000000 ll; \ 1111 l1_%=: r0 += 1; \ 1112 if r0 < r1 goto l1_%=; \ 1113 l0_%=: r0 = 0; \ 1114 exit; \ 1115 " : 1116 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1117 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1118 : __clobber_all); 1119 } 1120 1121 SEC("xdp") 1122 __description("bound check with JMP32_JLT for crossing 32-bit signed boundary") 1123 __success __retval(0) 1124 __naked void crossing_32_bit_signed_boundary_1(void) 1125 { 1126 asm volatile (" \ 1127 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1128 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1129 r1 = r2; \ 1130 r1 += 1; \ 1131 if r1 > r3 goto l0_%=; \ 1132 r1 = *(u8*)(r2 + 0); \ 1133 w0 = 0x7fffff10; \ 1134 w1 += w0; \ 1135 w0 = 0x80000000; \ 1136 l1_%=: w0 += 1; \ 1137 /* r1 unsigned range is [0, 0x8000000f] */ \ 1138 if w0 < w1 goto l1_%=; \ 1139 l0_%=: r0 = 0; \ 1140 exit; \ 1141 " : 1142 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1143 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1144 : __clobber_all); 1145 } 1146 1147 SEC("xdp") 1148 __description("bound check with JMP32_JSLT for crossing 32-bit signed boundary") 1149 __success __retval(0) 1150 __flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */ 1151 __naked void crossing_32_bit_signed_boundary_2(void) 1152 { 1153 asm volatile (" \ 1154 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1155 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1156 r1 = r2; \ 1157 r1 += 1; \ 1158 if r1 > r3 goto l0_%=; \ 1159 r1 = *(u8*)(r2 + 0); \ 1160 w0 = 0x7fffff10; \ 1161 w1 += w0; \ 1162 w2 = 0x80000fff; \ 1163 w0 = 0x80000000; \ 1164 l1_%=: w0 += 1; \ 1165 if w0 s> w2 goto l0_%=; \ 1166 /* r1 signed range is [S32_MIN, S32_MAX] */ \ 1167 if w0 s< w1 goto l1_%=; \ 1168 r0 = 1; \ 1169 exit; \ 1170 l0_%=: r0 = 0; \ 1171 exit; \ 1172 " : 1173 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1174 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1175 : __clobber_all); 1176 } 1177 1178 SEC("tc") 1179 __description("bounds check with JMP_NE for reg edge") 1180 __success __retval(0) 1181 __naked void reg_not_equal_const(void) 1182 { 1183 asm volatile (" \ 1184 r6 = r1; \ 1185 r1 = 0; \ 1186 *(u64*)(r10 - 8) = r1; \ 1187 call %[bpf_get_prandom_u32]; \ 1188 r4 = r0; \ 1189 r4 &= 7; \ 1190 if r4 != 0 goto l0_%=; \ 1191 r0 = 0; \ 1192 exit; \ 1193 l0_%=: r1 = r6; \ 1194 r2 = 0; \ 1195 r3 = r10; \ 1196 r3 += -8; \ 1197 r5 = 0; \ 1198 /* The 4th argument of bpf_skb_store_bytes is defined as \ 1199 * ARG_CONST_SIZE, so 0 is not allowed. The 'r4 != 0' \ 1200 * is providing us this exclusion of zero from initial \ 1201 * [0, 7] range. \ 1202 */ \ 1203 call %[bpf_skb_store_bytes]; \ 1204 r0 = 0; \ 1205 exit; \ 1206 " : 1207 : __imm(bpf_get_prandom_u32), 1208 __imm(bpf_skb_store_bytes) 1209 : __clobber_all); 1210 } 1211 1212 SEC("tc") 1213 __description("bounds check with JMP_EQ for reg edge") 1214 __success __retval(0) 1215 __naked void reg_equal_const(void) 1216 { 1217 asm volatile (" \ 1218 r6 = r1; \ 1219 r1 = 0; \ 1220 *(u64*)(r10 - 8) = r1; \ 1221 call %[bpf_get_prandom_u32]; \ 1222 r4 = r0; \ 1223 r4 &= 7; \ 1224 if r4 == 0 goto l0_%=; \ 1225 r1 = r6; \ 1226 r2 = 0; \ 1227 r3 = r10; \ 1228 r3 += -8; \ 1229 r5 = 0; \ 1230 /* Just the same as what we do in reg_not_equal_const() */ \ 1231 call %[bpf_skb_store_bytes]; \ 1232 l0_%=: r0 = 0; \ 1233 exit; \ 1234 " : 1235 : __imm(bpf_get_prandom_u32), 1236 __imm(bpf_skb_store_bytes) 1237 : __clobber_all); 1238 } 1239 1240 SEC("tc") 1241 __description("multiply mixed sign bounds. test 1") 1242 __success __log_level(2) 1243 __msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))") 1244 __naked void mult_mixed0_sign(void) 1245 { 1246 asm volatile ( 1247 "call %[bpf_get_prandom_u32];" 1248 "r6 = r0;" 1249 "call %[bpf_get_prandom_u32];" 1250 "r7 = r0;" 1251 "r6 &= 0xf;" 1252 "r6 -= 1000000000;" 1253 "r7 &= 0xf;" 1254 "r7 -= 2000000000;" 1255 "r6 *= r7;" 1256 "exit" 1257 : 1258 : __imm(bpf_get_prandom_u32), 1259 __imm(bpf_skb_store_bytes) 1260 : __clobber_all); 1261 } 1262 1263 SEC("tc") 1264 __description("multiply mixed sign bounds. test 2") 1265 __success __log_level(2) 1266 __msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)") 1267 __naked void mult_mixed1_sign(void) 1268 { 1269 asm volatile ( 1270 "call %[bpf_get_prandom_u32];" 1271 "r6 = r0;" 1272 "call %[bpf_get_prandom_u32];" 1273 "r7 = r0;" 1274 "r6 &= 0xf;" 1275 "r6 -= 0xa;" 1276 "r7 &= 0xf;" 1277 "r7 -= 0x14;" 1278 "r6 *= r7;" 1279 "exit" 1280 : 1281 : __imm(bpf_get_prandom_u32), 1282 __imm(bpf_skb_store_bytes) 1283 : __clobber_all); 1284 } 1285 1286 SEC("tc") 1287 __description("multiply negative bounds") 1288 __success __log_level(2) 1289 __msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))") 1290 __naked void mult_sign_bounds(void) 1291 { 1292 asm volatile ( 1293 "r8 = 0x7fff;" 1294 "call %[bpf_get_prandom_u32];" 1295 "r6 = r0;" 1296 "call %[bpf_get_prandom_u32];" 1297 "r7 = r0;" 1298 "r6 &= 0xa;" 1299 "r6 -= r8;" 1300 "r7 &= 0xf;" 1301 "r7 -= r8;" 1302 "r6 *= r7;" 1303 "exit" 1304 : 1305 : __imm(bpf_get_prandom_u32), 1306 __imm(bpf_skb_store_bytes) 1307 : __clobber_all); 1308 } 1309 1310 SEC("tc") 1311 __description("multiply bounds that don't cross signed boundary") 1312 __success __log_level(2) 1313 __msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))") 1314 __naked void mult_no_sign_crossing(void) 1315 { 1316 asm volatile ( 1317 "r6 = 0xb;" 1318 "r8 = 0xb3c3f8c99262687 ll;" 1319 "call %[bpf_get_prandom_u32];" 1320 "r7 = r0;" 1321 "r6 &= r7;" 1322 "r8 *= r6;" 1323 "exit" 1324 : 1325 : __imm(bpf_get_prandom_u32), 1326 __imm(bpf_skb_store_bytes) 1327 : __clobber_all); 1328 } 1329 1330 SEC("tc") 1331 __description("multiplication overflow, result in unbounded reg. test 1") 1332 __success __log_level(2) 1333 __msg("r6 *= r7 {{.*}}; R6_w=scalar()") 1334 __naked void mult_unsign_ovf(void) 1335 { 1336 asm volatile ( 1337 "r8 = 0x7ffffffffff ll;" 1338 "call %[bpf_get_prandom_u32];" 1339 "r6 = r0;" 1340 "call %[bpf_get_prandom_u32];" 1341 "r7 = r0;" 1342 "r6 &= 0x7fffffff;" 1343 "r7 &= r8;" 1344 "r6 *= r7;" 1345 "exit" 1346 : 1347 : __imm(bpf_get_prandom_u32), 1348 __imm(bpf_skb_store_bytes) 1349 : __clobber_all); 1350 } 1351 1352 SEC("tc") 1353 __description("multiplication overflow, result in unbounded reg. test 2") 1354 __success __log_level(2) 1355 __msg("r6 *= r7 {{.*}}; R6_w=scalar()") 1356 __naked void mult_sign_ovf(void) 1357 { 1358 asm volatile ( 1359 "r8 = 0x7ffffffff ll;" 1360 "call %[bpf_get_prandom_u32];" 1361 "r6 = r0;" 1362 "call %[bpf_get_prandom_u32];" 1363 "r7 = r0;" 1364 "r6 &= 0xa;" 1365 "r6 -= r8;" 1366 "r7 &= 0x7fffffff;" 1367 "r6 *= r7;" 1368 "exit" 1369 : 1370 : __imm(bpf_get_prandom_u32), 1371 __imm(bpf_skb_store_bytes) 1372 : __clobber_all); 1373 } 1374 1375 SEC("socket") 1376 __description("64-bit addition, all outcomes overflow") 1377 __success __log_level(2) 1378 __msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") 1379 __retval(0) 1380 __naked void add64_full_overflow(void) 1381 { 1382 asm volatile ( 1383 "call %[bpf_get_prandom_u32];" 1384 "r4 = r0;" 1385 "r3 = 0xa000000000000000 ll;" 1386 "r3 |= r4;" 1387 "r3 += r3;" 1388 "r0 = 0;" 1389 "exit" 1390 : 1391 : __imm(bpf_get_prandom_u32) 1392 : __clobber_all); 1393 } 1394 1395 SEC("socket") 1396 __description("64-bit addition, partial overflow, result in unbounded reg") 1397 __success __log_level(2) 1398 __msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()") 1399 __retval(0) 1400 __naked void add64_partial_overflow(void) 1401 { 1402 asm volatile ( 1403 "call %[bpf_get_prandom_u32];" 1404 "r4 = r0;" 1405 "r3 = 2;" 1406 "r3 |= r4;" 1407 "r3 += r3;" 1408 "r0 = 0;" 1409 "exit" 1410 : 1411 : __imm(bpf_get_prandom_u32) 1412 : __clobber_all); 1413 } 1414 1415 SEC("socket") 1416 __description("32-bit addition overflow, all outcomes overflow") 1417 __success __log_level(2) 1418 __msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))") 1419 __retval(0) 1420 __naked void add32_full_overflow(void) 1421 { 1422 asm volatile ( 1423 "call %[bpf_get_prandom_u32];" 1424 "w4 = w0;" 1425 "w3 = 0xa0000000;" 1426 "w3 |= w4;" 1427 "w3 += w3;" 1428 "r0 = 0;" 1429 "exit" 1430 : 1431 : __imm(bpf_get_prandom_u32) 1432 : __clobber_all); 1433 } 1434 1435 SEC("socket") 1436 __description("32-bit addition, partial overflow, result in unbounded u32 bounds") 1437 __success __log_level(2) 1438 __msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") 1439 __retval(0) 1440 __naked void add32_partial_overflow(void) 1441 { 1442 asm volatile ( 1443 "call %[bpf_get_prandom_u32];" 1444 "w4 = w0;" 1445 "w3 = 2;" 1446 "w3 |= w4;" 1447 "w3 += w3;" 1448 "r0 = 0;" 1449 "exit" 1450 : 1451 : __imm(bpf_get_prandom_u32) 1452 : __clobber_all); 1453 } 1454 1455 SEC("socket") 1456 __description("64-bit subtraction, all outcomes underflow") 1457 __success __log_level(2) 1458 __msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)") 1459 __retval(0) 1460 __naked void sub64_full_overflow(void) 1461 { 1462 asm volatile ( 1463 "call %[bpf_get_prandom_u32];" 1464 "r1 = r0;" 1465 "r2 = 0x8000000000000000 ll;" 1466 "r1 |= r2;" 1467 "r3 = 0;" 1468 "r3 -= r1;" 1469 "r0 = 0;" 1470 "exit" 1471 : 1472 : __imm(bpf_get_prandom_u32) 1473 : __clobber_all); 1474 } 1475 1476 SEC("socket") 1477 __description("64-bit subtraction, partial overflow, result in unbounded reg") 1478 __success __log_level(2) 1479 __msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()") 1480 __retval(0) 1481 __naked void sub64_partial_overflow(void) 1482 { 1483 asm volatile ( 1484 "call %[bpf_get_prandom_u32];" 1485 "r3 = r0;" 1486 "r2 = 1;" 1487 "r3 -= r2;" 1488 "r0 = 0;" 1489 "exit" 1490 : 1491 : __imm(bpf_get_prandom_u32) 1492 : __clobber_all); 1493 } 1494 1495 SEC("socket") 1496 __description("32-bit subtraction overflow, all outcomes underflow") 1497 __success __log_level(2) 1498 __msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))") 1499 __retval(0) 1500 __naked void sub32_full_overflow(void) 1501 { 1502 asm volatile ( 1503 "call %[bpf_get_prandom_u32];" 1504 "w1 = w0;" 1505 "w2 = 0x80000000;" 1506 "w1 |= w2;" 1507 "w3 = 0;" 1508 "w3 -= w1;" 1509 "r0 = 0;" 1510 "exit" 1511 : 1512 : __imm(bpf_get_prandom_u32) 1513 : __clobber_all); 1514 } 1515 1516 SEC("socket") 1517 __description("32-bit subtraction, partial overflow, result in unbounded u32 bounds") 1518 __success __log_level(2) 1519 __msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") 1520 __retval(0) 1521 __naked void sub32_partial_overflow(void) 1522 { 1523 asm volatile ( 1524 "call %[bpf_get_prandom_u32];" 1525 "w3 = w0;" 1526 "w2 = 1;" 1527 "w3 -= w2;" 1528 "r0 = 0;" 1529 "exit" 1530 : 1531 : __imm(bpf_get_prandom_u32) 1532 : __clobber_all); 1533 } 1534 1535 char _license[] SEC("license") = "GPL"; 1536