1 // SPDX-License-Identifier: GPL-2.0 2 /* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ 3 4 #include <linux/bpf.h> 5 #include <../../../include/linux/filter.h> 6 #include <bpf/bpf_helpers.h> 7 #include "bpf_misc.h" 8 9 struct { 10 __uint(type, BPF_MAP_TYPE_HASH); 11 __uint(max_entries, 1); 12 __type(key, long long); 13 __type(value, long long); 14 } map_hash_8b SEC(".maps"); 15 16 SEC("socket") 17 __description("subtraction bounds (map value) variant 1") 18 __failure __msg("R0 max value is outside of the allowed memory range") 19 __failure_unpriv 20 __naked void bounds_map_value_variant_1(void) 21 { 22 asm volatile (" \ 23 r1 = 0; \ 24 *(u64*)(r10 - 8) = r1; \ 25 r2 = r10; \ 26 r2 += -8; \ 27 r1 = %[map_hash_8b] ll; \ 28 call %[bpf_map_lookup_elem]; \ 29 if r0 == 0 goto l0_%=; \ 30 r1 = *(u8*)(r0 + 0); \ 31 if r1 > 0xff goto l0_%=; \ 32 r3 = *(u8*)(r0 + 1); \ 33 if r3 > 0xff goto l0_%=; \ 34 r1 -= r3; \ 35 r1 >>= 56; \ 36 r0 += r1; \ 37 r0 = *(u8*)(r0 + 0); \ 38 exit; \ 39 l0_%=: r0 = 0; \ 40 exit; \ 41 " : 42 : __imm(bpf_map_lookup_elem), 43 __imm_addr(map_hash_8b) 44 : __clobber_all); 45 } 46 47 SEC("socket") 48 __description("subtraction bounds (map value) variant 2") 49 __failure 50 __msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.") 51 __msg_unpriv("R1 has unknown scalar with mixed signed bounds") 52 __naked void bounds_map_value_variant_2(void) 53 { 54 asm volatile (" \ 55 r1 = 0; \ 56 *(u64*)(r10 - 8) = r1; \ 57 r2 = r10; \ 58 r2 += -8; \ 59 r1 = %[map_hash_8b] ll; \ 60 call %[bpf_map_lookup_elem]; \ 61 if r0 == 0 goto l0_%=; \ 62 r1 = *(u8*)(r0 + 0); \ 63 if r1 > 0xff goto l0_%=; \ 64 r3 = *(u8*)(r0 + 1); \ 65 if r3 > 0xff goto l0_%=; \ 66 r1 -= r3; \ 67 r0 += r1; \ 68 r0 = *(u8*)(r0 + 0); \ 69 exit; \ 70 l0_%=: r0 = 0; \ 71 exit; \ 72 " : 73 : __imm(bpf_map_lookup_elem), 74 __imm_addr(map_hash_8b) 75 : __clobber_all); 76 } 77 78 SEC("socket") 79 __description("check subtraction on pointers for unpriv") 80 __success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited") 81 __retval(0) 82 __naked void subtraction_on_pointers_for_unpriv(void) 83 { 84 asm volatile (" \ 85 r0 = 0; \ 86 r1 = %[map_hash_8b] ll; \ 87 r2 = r10; \ 88 r2 += -8; \ 89 r6 = 9; \ 90 *(u64*)(r2 + 0) = r6; \ 91 call %[bpf_map_lookup_elem]; \ 92 r9 = r10; \ 93 r9 -= r0; \ 94 r1 = %[map_hash_8b] ll; \ 95 r2 = r10; \ 96 r2 += -8; \ 97 r6 = 0; \ 98 *(u64*)(r2 + 0) = r6; \ 99 call %[bpf_map_lookup_elem]; \ 100 if r0 != 0 goto l0_%=; \ 101 exit; \ 102 l0_%=: *(u64*)(r0 + 0) = r9; \ 103 r0 = 0; \ 104 exit; \ 105 " : 106 : __imm(bpf_map_lookup_elem), 107 __imm_addr(map_hash_8b) 108 : __clobber_all); 109 } 110 111 SEC("socket") 112 __description("bounds check based on zero-extended MOV") 113 __success __success_unpriv __retval(0) 114 __naked void based_on_zero_extended_mov(void) 115 { 116 asm volatile (" \ 117 r1 = 0; \ 118 *(u64*)(r10 - 8) = r1; \ 119 r2 = r10; \ 120 r2 += -8; \ 121 r1 = %[map_hash_8b] ll; \ 122 call %[bpf_map_lookup_elem]; \ 123 if r0 == 0 goto l0_%=; \ 124 /* r2 = 0x0000'0000'ffff'ffff */ \ 125 w2 = 0xffffffff; \ 126 /* r2 = 0 */ \ 127 r2 >>= 32; \ 128 /* no-op */ \ 129 r0 += r2; \ 130 /* access at offset 0 */ \ 131 r0 = *(u8*)(r0 + 0); \ 132 l0_%=: /* exit */ \ 133 r0 = 0; \ 134 exit; \ 135 " : 136 : __imm(bpf_map_lookup_elem), 137 __imm_addr(map_hash_8b) 138 : __clobber_all); 139 } 140 141 SEC("socket") 142 __description("bounds check based on sign-extended MOV. test1") 143 __failure __msg("map_value pointer and 4294967295") 144 __failure_unpriv 145 __naked void on_sign_extended_mov_test1(void) 146 { 147 asm volatile (" \ 148 r1 = 0; \ 149 *(u64*)(r10 - 8) = r1; \ 150 r2 = r10; \ 151 r2 += -8; \ 152 r1 = %[map_hash_8b] ll; \ 153 call %[bpf_map_lookup_elem]; \ 154 if r0 == 0 goto l0_%=; \ 155 /* r2 = 0xffff'ffff'ffff'ffff */ \ 156 r2 = 0xffffffff; \ 157 /* r2 = 0xffff'ffff */ \ 158 r2 >>= 32; \ 159 /* r0 = <oob pointer> */ \ 160 r0 += r2; \ 161 /* access to OOB pointer */ \ 162 r0 = *(u8*)(r0 + 0); \ 163 l0_%=: /* exit */ \ 164 r0 = 0; \ 165 exit; \ 166 " : 167 : __imm(bpf_map_lookup_elem), 168 __imm_addr(map_hash_8b) 169 : __clobber_all); 170 } 171 172 SEC("socket") 173 __description("bounds check based on sign-extended MOV. test2") 174 __failure __msg("R0 min value is outside of the allowed memory range") 175 __failure_unpriv 176 __naked void on_sign_extended_mov_test2(void) 177 { 178 asm volatile (" \ 179 r1 = 0; \ 180 *(u64*)(r10 - 8) = r1; \ 181 r2 = r10; \ 182 r2 += -8; \ 183 r1 = %[map_hash_8b] ll; \ 184 call %[bpf_map_lookup_elem]; \ 185 if r0 == 0 goto l0_%=; \ 186 /* r2 = 0xffff'ffff'ffff'ffff */ \ 187 r2 = 0xffffffff; \ 188 /* r2 = 0xfff'ffff */ \ 189 r2 >>= 36; \ 190 /* r0 = <oob pointer> */ \ 191 r0 += r2; \ 192 /* access to OOB pointer */ \ 193 r0 = *(u8*)(r0 + 0); \ 194 l0_%=: /* exit */ \ 195 r0 = 0; \ 196 exit; \ 197 " : 198 : __imm(bpf_map_lookup_elem), 199 __imm_addr(map_hash_8b) 200 : __clobber_all); 201 } 202 203 SEC("tc") 204 __description("bounds check based on reg_off + var_off + insn_off. test1") 205 __failure __msg("value_size=8 off=1073741825") 206 __naked void var_off_insn_off_test1(void) 207 { 208 asm volatile (" \ 209 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ 210 r1 = 0; \ 211 *(u64*)(r10 - 8) = r1; \ 212 r2 = r10; \ 213 r2 += -8; \ 214 r1 = %[map_hash_8b] ll; \ 215 call %[bpf_map_lookup_elem]; \ 216 if r0 == 0 goto l0_%=; \ 217 r6 &= 1; \ 218 r6 += %[__imm_0]; \ 219 r0 += r6; \ 220 r0 += %[__imm_0]; \ 221 l0_%=: r0 = *(u8*)(r0 + 3); \ 222 r0 = 0; \ 223 exit; \ 224 " : 225 : __imm(bpf_map_lookup_elem), 226 __imm_addr(map_hash_8b), 227 __imm_const(__imm_0, (1 << 29) - 1), 228 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) 229 : __clobber_all); 230 } 231 232 SEC("tc") 233 __description("bounds check based on reg_off + var_off + insn_off. test2") 234 __failure __msg("value 1073741823") 235 __naked void var_off_insn_off_test2(void) 236 { 237 asm volatile (" \ 238 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ 239 r1 = 0; \ 240 *(u64*)(r10 - 8) = r1; \ 241 r2 = r10; \ 242 r2 += -8; \ 243 r1 = %[map_hash_8b] ll; \ 244 call %[bpf_map_lookup_elem]; \ 245 if r0 == 0 goto l0_%=; \ 246 r6 &= 1; \ 247 r6 += %[__imm_0]; \ 248 r0 += r6; \ 249 r0 += %[__imm_1]; \ 250 l0_%=: r0 = *(u8*)(r0 + 3); \ 251 r0 = 0; \ 252 exit; \ 253 " : 254 : __imm(bpf_map_lookup_elem), 255 __imm_addr(map_hash_8b), 256 __imm_const(__imm_0, (1 << 30) - 1), 257 __imm_const(__imm_1, (1 << 29) - 1), 258 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) 259 : __clobber_all); 260 } 261 262 SEC("socket") 263 __description("bounds check after truncation of non-boundary-crossing range") 264 __success __success_unpriv __retval(0) 265 __naked void of_non_boundary_crossing_range(void) 266 { 267 asm volatile (" \ 268 r1 = 0; \ 269 *(u64*)(r10 - 8) = r1; \ 270 r2 = r10; \ 271 r2 += -8; \ 272 r1 = %[map_hash_8b] ll; \ 273 call %[bpf_map_lookup_elem]; \ 274 if r0 == 0 goto l0_%=; \ 275 /* r1 = [0x00, 0xff] */ \ 276 r1 = *(u8*)(r0 + 0); \ 277 r2 = 1; \ 278 /* r2 = 0x10'0000'0000 */ \ 279 r2 <<= 36; \ 280 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \ 281 r1 += r2; \ 282 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \ 283 r1 += 0x7fffffff; \ 284 /* r1 = [0x00, 0xff] */ \ 285 w1 -= 0x7fffffff; \ 286 /* r1 = 0 */ \ 287 r1 >>= 8; \ 288 /* no-op */ \ 289 r0 += r1; \ 290 /* access at offset 0 */ \ 291 r0 = *(u8*)(r0 + 0); \ 292 l0_%=: /* exit */ \ 293 r0 = 0; \ 294 exit; \ 295 " : 296 : __imm(bpf_map_lookup_elem), 297 __imm_addr(map_hash_8b) 298 : __clobber_all); 299 } 300 301 SEC("socket") 302 __description("bounds check after truncation of boundary-crossing range (1)") 303 __failure 304 /* not actually fully unbounded, but the bound is very high */ 305 __msg("value -4294967168 makes map_value pointer be out of bounds") 306 __failure_unpriv 307 __naked void of_boundary_crossing_range_1(void) 308 { 309 asm volatile (" \ 310 r1 = 0; \ 311 *(u64*)(r10 - 8) = r1; \ 312 r2 = r10; \ 313 r2 += -8; \ 314 r1 = %[map_hash_8b] ll; \ 315 call %[bpf_map_lookup_elem]; \ 316 if r0 == 0 goto l0_%=; \ 317 /* r1 = [0x00, 0xff] */ \ 318 r1 = *(u8*)(r0 + 0); \ 319 r1 += %[__imm_0]; \ 320 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ 321 r1 += %[__imm_0]; \ 322 /* r1 = [0xffff'ff80, 0xffff'ffff] or \ 323 * [0x0000'0000, 0x0000'007f] \ 324 */ \ 325 w1 += 0; \ 326 r1 -= %[__imm_0]; \ 327 /* r1 = [0x00, 0xff] or \ 328 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ 329 */ \ 330 r1 -= %[__imm_0]; \ 331 /* error on OOB pointer computation */ \ 332 r0 += r1; \ 333 /* exit */ \ 334 r0 = 0; \ 335 l0_%=: exit; \ 336 " : 337 : __imm(bpf_map_lookup_elem), 338 __imm_addr(map_hash_8b), 339 __imm_const(__imm_0, 0xffffff80 >> 1) 340 : __clobber_all); 341 } 342 343 SEC("socket") 344 __description("bounds check after truncation of boundary-crossing range (2)") 345 __failure __msg("value -4294967168 makes map_value pointer be out of bounds") 346 __failure_unpriv 347 __naked void of_boundary_crossing_range_2(void) 348 { 349 asm volatile (" \ 350 r1 = 0; \ 351 *(u64*)(r10 - 8) = r1; \ 352 r2 = r10; \ 353 r2 += -8; \ 354 r1 = %[map_hash_8b] ll; \ 355 call %[bpf_map_lookup_elem]; \ 356 if r0 == 0 goto l0_%=; \ 357 /* r1 = [0x00, 0xff] */ \ 358 r1 = *(u8*)(r0 + 0); \ 359 r1 += %[__imm_0]; \ 360 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ 361 r1 += %[__imm_0]; \ 362 /* r1 = [0xffff'ff80, 0xffff'ffff] or \ 363 * [0x0000'0000, 0x0000'007f] \ 364 * difference to previous test: truncation via MOV32\ 365 * instead of ALU32. \ 366 */ \ 367 w1 = w1; \ 368 r1 -= %[__imm_0]; \ 369 /* r1 = [0x00, 0xff] or \ 370 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ 371 */ \ 372 r1 -= %[__imm_0]; \ 373 /* error on OOB pointer computation */ \ 374 r0 += r1; \ 375 /* exit */ \ 376 r0 = 0; \ 377 l0_%=: exit; \ 378 " : 379 : __imm(bpf_map_lookup_elem), 380 __imm_addr(map_hash_8b), 381 __imm_const(__imm_0, 0xffffff80 >> 1) 382 : __clobber_all); 383 } 384 385 SEC("socket") 386 __description("bounds check after wrapping 32-bit addition") 387 __success __success_unpriv __retval(0) 388 __naked void after_wrapping_32_bit_addition(void) 389 { 390 asm volatile (" \ 391 r1 = 0; \ 392 *(u64*)(r10 - 8) = r1; \ 393 r2 = r10; \ 394 r2 += -8; \ 395 r1 = %[map_hash_8b] ll; \ 396 call %[bpf_map_lookup_elem]; \ 397 if r0 == 0 goto l0_%=; \ 398 /* r1 = 0x7fff'ffff */ \ 399 r1 = 0x7fffffff; \ 400 /* r1 = 0xffff'fffe */ \ 401 r1 += 0x7fffffff; \ 402 /* r1 = 0 */ \ 403 w1 += 2; \ 404 /* no-op */ \ 405 r0 += r1; \ 406 /* access at offset 0 */ \ 407 r0 = *(u8*)(r0 + 0); \ 408 l0_%=: /* exit */ \ 409 r0 = 0; \ 410 exit; \ 411 " : 412 : __imm(bpf_map_lookup_elem), 413 __imm_addr(map_hash_8b) 414 : __clobber_all); 415 } 416 417 SEC("socket") 418 __description("bounds check after shift with oversized count operand") 419 __failure __msg("R0 max value is outside of the allowed memory range") 420 __failure_unpriv 421 __naked void shift_with_oversized_count_operand(void) 422 { 423 asm volatile (" \ 424 r1 = 0; \ 425 *(u64*)(r10 - 8) = r1; \ 426 r2 = r10; \ 427 r2 += -8; \ 428 r1 = %[map_hash_8b] ll; \ 429 call %[bpf_map_lookup_elem]; \ 430 if r0 == 0 goto l0_%=; \ 431 r2 = 32; \ 432 r1 = 1; \ 433 /* r1 = (u32)1 << (u32)32 = ? */ \ 434 w1 <<= w2; \ 435 /* r1 = [0x0000, 0xffff] */ \ 436 r1 &= 0xffff; \ 437 /* computes unknown pointer, potentially OOB */ \ 438 r0 += r1; \ 439 /* potentially OOB access */ \ 440 r0 = *(u8*)(r0 + 0); \ 441 l0_%=: /* exit */ \ 442 r0 = 0; \ 443 exit; \ 444 " : 445 : __imm(bpf_map_lookup_elem), 446 __imm_addr(map_hash_8b) 447 : __clobber_all); 448 } 449 450 SEC("socket") 451 __description("bounds check after right shift of maybe-negative number") 452 __failure __msg("R0 unbounded memory access") 453 __failure_unpriv 454 __naked void shift_of_maybe_negative_number(void) 455 { 456 asm volatile (" \ 457 r1 = 0; \ 458 *(u64*)(r10 - 8) = r1; \ 459 r2 = r10; \ 460 r2 += -8; \ 461 r1 = %[map_hash_8b] ll; \ 462 call %[bpf_map_lookup_elem]; \ 463 if r0 == 0 goto l0_%=; \ 464 /* r1 = [0x00, 0xff] */ \ 465 r1 = *(u8*)(r0 + 0); \ 466 /* r1 = [-0x01, 0xfe] */ \ 467 r1 -= 1; \ 468 /* r1 = 0 or 0xff'ffff'ffff'ffff */ \ 469 r1 >>= 8; \ 470 /* r1 = 0 or 0xffff'ffff'ffff */ \ 471 r1 >>= 8; \ 472 /* computes unknown pointer, potentially OOB */ \ 473 r0 += r1; \ 474 /* potentially OOB access */ \ 475 r0 = *(u8*)(r0 + 0); \ 476 l0_%=: /* exit */ \ 477 r0 = 0; \ 478 exit; \ 479 " : 480 : __imm(bpf_map_lookup_elem), 481 __imm_addr(map_hash_8b) 482 : __clobber_all); 483 } 484 485 SEC("socket") 486 __description("bounds check after 32-bit right shift with 64-bit input") 487 __failure __msg("math between map_value pointer and 4294967294 is not allowed") 488 __failure_unpriv 489 __naked void shift_with_64_bit_input(void) 490 { 491 asm volatile (" \ 492 r1 = 0; \ 493 *(u64*)(r10 - 8) = r1; \ 494 r2 = r10; \ 495 r2 += -8; \ 496 r1 = %[map_hash_8b] ll; \ 497 call %[bpf_map_lookup_elem]; \ 498 if r0 == 0 goto l0_%=; \ 499 r1 = 2; \ 500 /* r1 = 1<<32 */ \ 501 r1 <<= 31; \ 502 /* r1 = 0 (NOT 2!) */ \ 503 w1 >>= 31; \ 504 /* r1 = 0xffff'fffe (NOT 0!) */ \ 505 w1 -= 2; \ 506 /* error on computing OOB pointer */ \ 507 r0 += r1; \ 508 /* exit */ \ 509 r0 = 0; \ 510 l0_%=: exit; \ 511 " : 512 : __imm(bpf_map_lookup_elem), 513 __imm_addr(map_hash_8b) 514 : __clobber_all); 515 } 516 517 SEC("socket") 518 __description("bounds check map access with off+size signed 32bit overflow. test1") 519 __failure __msg("map_value pointer and 2147483646") 520 __failure_unpriv 521 __naked void size_signed_32bit_overflow_test1(void) 522 { 523 asm volatile (" \ 524 r1 = 0; \ 525 *(u64*)(r10 - 8) = r1; \ 526 r2 = r10; \ 527 r2 += -8; \ 528 r1 = %[map_hash_8b] ll; \ 529 call %[bpf_map_lookup_elem]; \ 530 if r0 != 0 goto l0_%=; \ 531 exit; \ 532 l0_%=: r0 += 0x7ffffffe; \ 533 r0 = *(u64*)(r0 + 0); \ 534 goto l1_%=; \ 535 l1_%=: exit; \ 536 " : 537 : __imm(bpf_map_lookup_elem), 538 __imm_addr(map_hash_8b) 539 : __clobber_all); 540 } 541 542 SEC("socket") 543 __description("bounds check map access with off+size signed 32bit overflow. test2") 544 __failure __msg("pointer offset 1073741822") 545 __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 546 __naked void size_signed_32bit_overflow_test2(void) 547 { 548 asm volatile (" \ 549 r1 = 0; \ 550 *(u64*)(r10 - 8) = r1; \ 551 r2 = r10; \ 552 r2 += -8; \ 553 r1 = %[map_hash_8b] ll; \ 554 call %[bpf_map_lookup_elem]; \ 555 if r0 != 0 goto l0_%=; \ 556 exit; \ 557 l0_%=: r0 += 0x1fffffff; \ 558 r0 += 0x1fffffff; \ 559 r0 += 0x1fffffff; \ 560 r0 = *(u64*)(r0 + 0); \ 561 goto l1_%=; \ 562 l1_%=: exit; \ 563 " : 564 : __imm(bpf_map_lookup_elem), 565 __imm_addr(map_hash_8b) 566 : __clobber_all); 567 } 568 569 SEC("socket") 570 __description("bounds check map access with off+size signed 32bit overflow. test3") 571 __failure __msg("pointer offset -1073741822") 572 __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 573 __naked void size_signed_32bit_overflow_test3(void) 574 { 575 asm volatile (" \ 576 r1 = 0; \ 577 *(u64*)(r10 - 8) = r1; \ 578 r2 = r10; \ 579 r2 += -8; \ 580 r1 = %[map_hash_8b] ll; \ 581 call %[bpf_map_lookup_elem]; \ 582 if r0 != 0 goto l0_%=; \ 583 exit; \ 584 l0_%=: r0 -= 0x1fffffff; \ 585 r0 -= 0x1fffffff; \ 586 r0 = *(u64*)(r0 + 2); \ 587 goto l1_%=; \ 588 l1_%=: exit; \ 589 " : 590 : __imm(bpf_map_lookup_elem), 591 __imm_addr(map_hash_8b) 592 : __clobber_all); 593 } 594 595 SEC("socket") 596 __description("bounds check map access with off+size signed 32bit overflow. test4") 597 __failure __msg("map_value pointer and 1000000000000") 598 __failure_unpriv 599 __naked void size_signed_32bit_overflow_test4(void) 600 { 601 asm volatile (" \ 602 r1 = 0; \ 603 *(u64*)(r10 - 8) = r1; \ 604 r2 = r10; \ 605 r2 += -8; \ 606 r1 = %[map_hash_8b] ll; \ 607 call %[bpf_map_lookup_elem]; \ 608 if r0 != 0 goto l0_%=; \ 609 exit; \ 610 l0_%=: r1 = 1000000; \ 611 r1 *= 1000000; \ 612 r0 += r1; \ 613 r0 = *(u64*)(r0 + 2); \ 614 goto l1_%=; \ 615 l1_%=: exit; \ 616 " : 617 : __imm(bpf_map_lookup_elem), 618 __imm_addr(map_hash_8b) 619 : __clobber_all); 620 } 621 622 SEC("socket") 623 __description("bounds check mixed 32bit and 64bit arithmetic. test1") 624 __success __success_unpriv 625 __retval(0) 626 #ifdef SPEC_V1 627 __xlated_unpriv("goto pc+2") 628 __xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ 629 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 630 __xlated_unpriv("exit") 631 #endif 632 __naked void _32bit_and_64bit_arithmetic_test1(void) 633 { 634 asm volatile (" \ 635 r0 = 0; \ 636 r1 = -1; \ 637 r1 <<= 32; \ 638 r1 += 1; \ 639 /* r1 = 0xffffFFFF00000001 */ \ 640 if w1 > 1 goto l0_%=; \ 641 /* check ALU64 op keeps 32bit bounds */ \ 642 r1 += 1; \ 643 if w1 > 2 goto l0_%=; \ 644 goto l1_%=; \ 645 l0_%=: /* invalid ldx if bounds are lost above */ \ 646 r0 = *(u64*)(r0 - 1); \ 647 l1_%=: exit; \ 648 " ::: __clobber_all); 649 } 650 651 SEC("socket") 652 __description("bounds check mixed 32bit and 64bit arithmetic. test2") 653 __success __success_unpriv 654 __retval(0) 655 #ifdef SPEC_V1 656 __xlated_unpriv("goto pc+2") 657 __xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ 658 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 659 __xlated_unpriv("exit") 660 #endif 661 __naked void _32bit_and_64bit_arithmetic_test2(void) 662 { 663 asm volatile (" \ 664 r0 = 0; \ 665 r1 = -1; \ 666 r1 <<= 32; \ 667 r1 += 1; \ 668 /* r1 = 0xffffFFFF00000001 */ \ 669 r2 = 3; \ 670 /* r1 = 0x2 */ \ 671 w1 += 1; \ 672 /* check ALU32 op zero extends 64bit bounds */ \ 673 if r1 > r2 goto l0_%=; \ 674 goto l1_%=; \ 675 l0_%=: /* invalid ldx if bounds are lost above */ \ 676 r0 = *(u64*)(r0 - 1); \ 677 l1_%=: exit; \ 678 " ::: __clobber_all); 679 } 680 681 SEC("tc") 682 __description("assigning 32bit bounds to 64bit for wA = 0, wB = wA") 683 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) 684 __naked void for_wa_0_wb_wa(void) 685 { 686 asm volatile (" \ 687 r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 688 r7 = *(u32*)(r1 + %[__sk_buff_data]); \ 689 w9 = 0; \ 690 w2 = w9; \ 691 r6 = r7; \ 692 r6 += r2; \ 693 r3 = r6; \ 694 r3 += 8; \ 695 if r3 > r8 goto l0_%=; \ 696 r5 = *(u32*)(r6 + 0); \ 697 l0_%=: r0 = 0; \ 698 exit; \ 699 " : 700 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 701 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 702 : __clobber_all); 703 } 704 705 SEC("socket") 706 __description("bounds check for reg = 0, reg xor 1") 707 __success __success_unpriv 708 __retval(0) 709 #ifdef SPEC_V1 710 __xlated_unpriv("if r1 != 0x0 goto pc+2") 711 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 712 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 713 __xlated_unpriv("r0 = 0") 714 #endif 715 __naked void reg_0_reg_xor_1(void) 716 { 717 asm volatile (" \ 718 r1 = 0; \ 719 *(u64*)(r10 - 8) = r1; \ 720 r2 = r10; \ 721 r2 += -8; \ 722 r1 = %[map_hash_8b] ll; \ 723 call %[bpf_map_lookup_elem]; \ 724 if r0 != 0 goto l0_%=; \ 725 exit; \ 726 l0_%=: r1 = 0; \ 727 r1 ^= 1; \ 728 if r1 != 0 goto l1_%=; \ 729 r0 = *(u64*)(r0 + 8); \ 730 l1_%=: r0 = 0; \ 731 exit; \ 732 " : 733 : __imm(bpf_map_lookup_elem), 734 __imm_addr(map_hash_8b) 735 : __clobber_all); 736 } 737 738 SEC("socket") 739 __description("bounds check for reg32 = 0, reg32 xor 1") 740 __success __success_unpriv 741 __retval(0) 742 #ifdef SPEC_V1 743 __xlated_unpriv("if w1 != 0x0 goto pc+2") 744 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 745 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 746 __xlated_unpriv("r0 = 0") 747 #endif 748 __naked void reg32_0_reg32_xor_1(void) 749 { 750 asm volatile (" \ 751 r1 = 0; \ 752 *(u64*)(r10 - 8) = r1; \ 753 r2 = r10; \ 754 r2 += -8; \ 755 r1 = %[map_hash_8b] ll; \ 756 call %[bpf_map_lookup_elem]; \ 757 if r0 != 0 goto l0_%=; \ 758 exit; \ 759 l0_%=: w1 = 0; \ 760 w1 ^= 1; \ 761 if w1 != 0 goto l1_%=; \ 762 r0 = *(u64*)(r0 + 8); \ 763 l1_%=: r0 = 0; \ 764 exit; \ 765 " : 766 : __imm(bpf_map_lookup_elem), 767 __imm_addr(map_hash_8b) 768 : __clobber_all); 769 } 770 771 SEC("socket") 772 __description("bounds check for reg = 2, reg xor 3") 773 __success __success_unpriv 774 __retval(0) 775 #ifdef SPEC_V1 776 __xlated_unpriv("if r1 > 0x0 goto pc+2") 777 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 778 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 779 __xlated_unpriv("r0 = 0") 780 #endif 781 __naked void reg_2_reg_xor_3(void) 782 { 783 asm volatile (" \ 784 r1 = 0; \ 785 *(u64*)(r10 - 8) = r1; \ 786 r2 = r10; \ 787 r2 += -8; \ 788 r1 = %[map_hash_8b] ll; \ 789 call %[bpf_map_lookup_elem]; \ 790 if r0 != 0 goto l0_%=; \ 791 exit; \ 792 l0_%=: r1 = 2; \ 793 r1 ^= 3; \ 794 if r1 > 0 goto l1_%=; \ 795 r0 = *(u64*)(r0 + 8); \ 796 l1_%=: r0 = 0; \ 797 exit; \ 798 " : 799 : __imm(bpf_map_lookup_elem), 800 __imm_addr(map_hash_8b) 801 : __clobber_all); 802 } 803 804 SEC("socket") 805 __description("bounds check for reg = any, reg xor 3") 806 __failure __msg("invalid access to map value") 807 __msg_unpriv("invalid access to map value") 808 __naked void reg_any_reg_xor_3(void) 809 { 810 asm volatile (" \ 811 r1 = 0; \ 812 *(u64*)(r10 - 8) = r1; \ 813 r2 = r10; \ 814 r2 += -8; \ 815 r1 = %[map_hash_8b] ll; \ 816 call %[bpf_map_lookup_elem]; \ 817 if r0 != 0 goto l0_%=; \ 818 exit; \ 819 l0_%=: r1 = *(u64*)(r0 + 0); \ 820 r1 ^= 3; \ 821 if r1 != 0 goto l1_%=; \ 822 r0 = *(u64*)(r0 + 8); \ 823 l1_%=: r0 = 0; \ 824 exit; \ 825 " : 826 : __imm(bpf_map_lookup_elem), 827 __imm_addr(map_hash_8b) 828 : __clobber_all); 829 } 830 831 SEC("socket") 832 __description("bounds check for reg32 = any, reg32 xor 3") 833 __failure __msg("invalid access to map value") 834 __msg_unpriv("invalid access to map value") 835 __naked void reg32_any_reg32_xor_3(void) 836 { 837 asm volatile (" \ 838 r1 = 0; \ 839 *(u64*)(r10 - 8) = r1; \ 840 r2 = r10; \ 841 r2 += -8; \ 842 r1 = %[map_hash_8b] ll; \ 843 call %[bpf_map_lookup_elem]; \ 844 if r0 != 0 goto l0_%=; \ 845 exit; \ 846 l0_%=: r1 = *(u64*)(r0 + 0); \ 847 w1 ^= 3; \ 848 if w1 != 0 goto l1_%=; \ 849 r0 = *(u64*)(r0 + 8); \ 850 l1_%=: r0 = 0; \ 851 exit; \ 852 " : 853 : __imm(bpf_map_lookup_elem), 854 __imm_addr(map_hash_8b) 855 : __clobber_all); 856 } 857 858 SEC("socket") 859 __description("bounds check for reg > 0, reg xor 3") 860 __success __success_unpriv 861 __retval(0) 862 #ifdef SPEC_V1 863 __xlated_unpriv("if r1 >= 0x0 goto pc+2") 864 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 865 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 866 __xlated_unpriv("r0 = 0") 867 #endif 868 __naked void reg_0_reg_xor_3(void) 869 { 870 asm volatile (" \ 871 r1 = 0; \ 872 *(u64*)(r10 - 8) = r1; \ 873 r2 = r10; \ 874 r2 += -8; \ 875 r1 = %[map_hash_8b] ll; \ 876 call %[bpf_map_lookup_elem]; \ 877 if r0 != 0 goto l0_%=; \ 878 exit; \ 879 l0_%=: r1 = *(u64*)(r0 + 0); \ 880 if r1 <= 0 goto l1_%=; \ 881 r1 ^= 3; \ 882 if r1 >= 0 goto l1_%=; \ 883 r0 = *(u64*)(r0 + 8); \ 884 l1_%=: r0 = 0; \ 885 exit; \ 886 " : 887 : __imm(bpf_map_lookup_elem), 888 __imm_addr(map_hash_8b) 889 : __clobber_all); 890 } 891 892 SEC("socket") 893 __description("bounds check for reg32 > 0, reg32 xor 3") 894 __success __success_unpriv 895 __retval(0) 896 #ifdef SPEC_V1 897 __xlated_unpriv("if w1 >= 0x0 goto pc+2") 898 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ 899 __xlated_unpriv("goto pc-1") /* sanitized dead code */ 900 __xlated_unpriv("r0 = 0") 901 #endif 902 __naked void reg32_0_reg32_xor_3(void) 903 { 904 asm volatile (" \ 905 r1 = 0; \ 906 *(u64*)(r10 - 8) = r1; \ 907 r2 = r10; \ 908 r2 += -8; \ 909 r1 = %[map_hash_8b] ll; \ 910 call %[bpf_map_lookup_elem]; \ 911 if r0 != 0 goto l0_%=; \ 912 exit; \ 913 l0_%=: r1 = *(u64*)(r0 + 0); \ 914 if w1 <= 0 goto l1_%=; \ 915 w1 ^= 3; \ 916 if w1 >= 0 goto l1_%=; \ 917 r0 = *(u64*)(r0 + 8); \ 918 l1_%=: r0 = 0; \ 919 exit; \ 920 " : 921 : __imm(bpf_map_lookup_elem), 922 __imm_addr(map_hash_8b) 923 : __clobber_all); 924 } 925 926 SEC("socket") 927 __description("bounds check for non const xor src dst") 928 __success __log_level(2) 929 __msg("5: (af) r0 ^= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))") 930 __naked void non_const_xor_src_dst(void) 931 { 932 asm volatile (" \ 933 call %[bpf_get_prandom_u32]; \ 934 r6 = r0; \ 935 call %[bpf_get_prandom_u32]; \ 936 r6 &= 0xaf; \ 937 r0 &= 0x1a0; \ 938 r0 ^= r6; \ 939 exit; \ 940 " : 941 : __imm(bpf_map_lookup_elem), 942 __imm_addr(map_hash_8b), 943 __imm(bpf_get_prandom_u32) 944 : __clobber_all); 945 } 946 947 SEC("socket") 948 __description("bounds check for non const or src dst") 949 __success __log_level(2) 950 __msg("5: (4f) r0 |= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))") 951 __naked void non_const_or_src_dst(void) 952 { 953 asm volatile (" \ 954 call %[bpf_get_prandom_u32]; \ 955 r6 = r0; \ 956 call %[bpf_get_prandom_u32]; \ 957 r6 &= 0xaf; \ 958 r0 &= 0x1a0; \ 959 r0 |= r6; \ 960 exit; \ 961 " : 962 : __imm(bpf_map_lookup_elem), 963 __imm_addr(map_hash_8b), 964 __imm(bpf_get_prandom_u32) 965 : __clobber_all); 966 } 967 968 SEC("socket") 969 __description("bounds check for non const mul regs") 970 __success __log_level(2) 971 __msg("5: (2f) r0 *= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=3825,var_off=(0x0; 0xfff))") 972 __naked void non_const_mul_regs(void) 973 { 974 asm volatile (" \ 975 call %[bpf_get_prandom_u32]; \ 976 r6 = r0; \ 977 call %[bpf_get_prandom_u32]; \ 978 r6 &= 0xff; \ 979 r0 &= 0x0f; \ 980 r0 *= r6; \ 981 exit; \ 982 " : 983 : __imm(bpf_map_lookup_elem), 984 __imm_addr(map_hash_8b), 985 __imm(bpf_get_prandom_u32) 986 : __clobber_all); 987 } 988 989 SEC("socket") 990 __description("bounds checks after 32-bit truncation. test 1") 991 __success __failure_unpriv __msg_unpriv("R0 leaks addr") 992 __retval(0) 993 __naked void _32_bit_truncation_test_1(void) 994 { 995 asm volatile (" \ 996 r1 = 0; \ 997 *(u64*)(r10 - 8) = r1; \ 998 r2 = r10; \ 999 r2 += -8; \ 1000 r1 = %[map_hash_8b] ll; \ 1001 call %[bpf_map_lookup_elem]; \ 1002 if r0 == 0 goto l0_%=; \ 1003 r1 = *(u32*)(r0 + 0); \ 1004 /* This used to reduce the max bound to 0x7fffffff */\ 1005 if r1 == 0 goto l1_%=; \ 1006 if r1 > 0x7fffffff goto l0_%=; \ 1007 l1_%=: r0 = 0; \ 1008 l0_%=: exit; \ 1009 " : 1010 : __imm(bpf_map_lookup_elem), 1011 __imm_addr(map_hash_8b) 1012 : __clobber_all); 1013 } 1014 1015 SEC("socket") 1016 __description("bounds checks after 32-bit truncation. test 2") 1017 __success __failure_unpriv __msg_unpriv("R0 leaks addr") 1018 __retval(0) 1019 __naked void _32_bit_truncation_test_2(void) 1020 { 1021 asm volatile (" \ 1022 r1 = 0; \ 1023 *(u64*)(r10 - 8) = r1; \ 1024 r2 = r10; \ 1025 r2 += -8; \ 1026 r1 = %[map_hash_8b] ll; \ 1027 call %[bpf_map_lookup_elem]; \ 1028 if r0 == 0 goto l0_%=; \ 1029 r1 = *(u32*)(r0 + 0); \ 1030 if r1 s< 1 goto l1_%=; \ 1031 if w1 s< 0 goto l0_%=; \ 1032 l1_%=: r0 = 0; \ 1033 l0_%=: exit; \ 1034 " : 1035 : __imm(bpf_map_lookup_elem), 1036 __imm_addr(map_hash_8b) 1037 : __clobber_all); 1038 } 1039 1040 SEC("xdp") 1041 __description("bound check with JMP_JLT for crossing 64-bit signed boundary") 1042 __success __retval(0) 1043 __naked void crossing_64_bit_signed_boundary_1(void) 1044 { 1045 asm volatile (" \ 1046 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1047 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1048 r1 = r2; \ 1049 r1 += 1; \ 1050 if r1 > r3 goto l0_%=; \ 1051 r1 = *(u8*)(r2 + 0); \ 1052 r0 = 0x7fffffffffffff10 ll; \ 1053 r1 += r0; \ 1054 r0 = 0x8000000000000000 ll; \ 1055 l1_%=: r0 += 1; \ 1056 /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\ 1057 if r0 < r1 goto l1_%=; \ 1058 l0_%=: r0 = 0; \ 1059 exit; \ 1060 " : 1061 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1062 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1063 : __clobber_all); 1064 } 1065 1066 SEC("xdp") 1067 __description("bound check with JMP_JSLT for crossing 64-bit signed boundary") 1068 __success __retval(0) 1069 __flag(BPF_F_TEST_REG_INVARIANTS) 1070 __naked void crossing_64_bit_signed_boundary_2(void) 1071 { 1072 asm volatile (" \ 1073 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1074 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1075 r1 = r2; \ 1076 r1 += 1; \ 1077 if r1 > r3 goto l0_%=; \ 1078 r1 = *(u8*)(r2 + 0); \ 1079 r0 = 0x7fffffffffffff10 ll; \ 1080 r1 += r0; \ 1081 r2 = 0x8000000000000fff ll; \ 1082 r0 = 0x8000000000000000 ll; \ 1083 l1_%=: r0 += 1; \ 1084 if r0 s> r2 goto l0_%=; \ 1085 /* r1 signed range is [S64_MIN, S64_MAX] */ \ 1086 if r0 s< r1 goto l1_%=; \ 1087 r0 = 1; \ 1088 exit; \ 1089 l0_%=: r0 = 0; \ 1090 exit; \ 1091 " : 1092 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1093 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1094 : __clobber_all); 1095 } 1096 1097 SEC("xdp") 1098 __description("bound check for loop upper bound greater than U32_MAX") 1099 __success __retval(0) 1100 __naked void bound_greater_than_u32_max(void) 1101 { 1102 asm volatile (" \ 1103 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1104 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1105 r1 = r2; \ 1106 r1 += 1; \ 1107 if r1 > r3 goto l0_%=; \ 1108 r1 = *(u8*)(r2 + 0); \ 1109 r0 = 0x100000000 ll; \ 1110 r1 += r0; \ 1111 r0 = 0x100000000 ll; \ 1112 l1_%=: r0 += 1; \ 1113 if r0 < r1 goto l1_%=; \ 1114 l0_%=: r0 = 0; \ 1115 exit; \ 1116 " : 1117 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1118 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1119 : __clobber_all); 1120 } 1121 1122 SEC("xdp") 1123 __description("bound check with JMP32_JLT for crossing 32-bit signed boundary") 1124 __success __retval(0) 1125 __naked void crossing_32_bit_signed_boundary_1(void) 1126 { 1127 asm volatile (" \ 1128 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1129 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1130 r1 = r2; \ 1131 r1 += 1; \ 1132 if r1 > r3 goto l0_%=; \ 1133 r1 = *(u8*)(r2 + 0); \ 1134 w0 = 0x7fffff10; \ 1135 w1 += w0; \ 1136 w0 = 0x80000000; \ 1137 l1_%=: w0 += 1; \ 1138 /* r1 unsigned range is [0, 0x8000000f] */ \ 1139 if w0 < w1 goto l1_%=; \ 1140 l0_%=: r0 = 0; \ 1141 exit; \ 1142 " : 1143 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1144 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1145 : __clobber_all); 1146 } 1147 1148 SEC("xdp") 1149 __description("bound check with JMP32_JSLT for crossing 32-bit signed boundary") 1150 __success __retval(0) 1151 __flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */ 1152 __naked void crossing_32_bit_signed_boundary_2(void) 1153 { 1154 asm volatile (" \ 1155 r2 = *(u32*)(r1 + %[xdp_md_data]); \ 1156 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 1157 r1 = r2; \ 1158 r1 += 1; \ 1159 if r1 > r3 goto l0_%=; \ 1160 r1 = *(u8*)(r2 + 0); \ 1161 w0 = 0x7fffff10; \ 1162 w1 += w0; \ 1163 w2 = 0x80000fff; \ 1164 w0 = 0x80000000; \ 1165 l1_%=: w0 += 1; \ 1166 if w0 s> w2 goto l0_%=; \ 1167 /* r1 signed range is [S32_MIN, S32_MAX] */ \ 1168 if w0 s< w1 goto l1_%=; \ 1169 r0 = 1; \ 1170 exit; \ 1171 l0_%=: r0 = 0; \ 1172 exit; \ 1173 " : 1174 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 1175 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 1176 : __clobber_all); 1177 } 1178 1179 SEC("tc") 1180 __description("bounds check with JMP_NE for reg edge") 1181 __success __retval(0) 1182 __naked void reg_not_equal_const(void) 1183 { 1184 asm volatile (" \ 1185 r6 = r1; \ 1186 r1 = 0; \ 1187 *(u64*)(r10 - 8) = r1; \ 1188 call %[bpf_get_prandom_u32]; \ 1189 r4 = r0; \ 1190 r4 &= 7; \ 1191 if r4 != 0 goto l0_%=; \ 1192 r0 = 0; \ 1193 exit; \ 1194 l0_%=: r1 = r6; \ 1195 r2 = 0; \ 1196 r3 = r10; \ 1197 r3 += -8; \ 1198 r5 = 0; \ 1199 /* The 4th argument of bpf_skb_store_bytes is defined as \ 1200 * ARG_CONST_SIZE, so 0 is not allowed. The 'r4 != 0' \ 1201 * is providing us this exclusion of zero from initial \ 1202 * [0, 7] range. \ 1203 */ \ 1204 call %[bpf_skb_store_bytes]; \ 1205 r0 = 0; \ 1206 exit; \ 1207 " : 1208 : __imm(bpf_get_prandom_u32), 1209 __imm(bpf_skb_store_bytes) 1210 : __clobber_all); 1211 } 1212 1213 SEC("tc") 1214 __description("bounds check with JMP_EQ for reg edge") 1215 __success __retval(0) 1216 __naked void reg_equal_const(void) 1217 { 1218 asm volatile (" \ 1219 r6 = r1; \ 1220 r1 = 0; \ 1221 *(u64*)(r10 - 8) = r1; \ 1222 call %[bpf_get_prandom_u32]; \ 1223 r4 = r0; \ 1224 r4 &= 7; \ 1225 if r4 == 0 goto l0_%=; \ 1226 r1 = r6; \ 1227 r2 = 0; \ 1228 r3 = r10; \ 1229 r3 += -8; \ 1230 r5 = 0; \ 1231 /* Just the same as what we do in reg_not_equal_const() */ \ 1232 call %[bpf_skb_store_bytes]; \ 1233 l0_%=: r0 = 0; \ 1234 exit; \ 1235 " : 1236 : __imm(bpf_get_prandom_u32), 1237 __imm(bpf_skb_store_bytes) 1238 : __clobber_all); 1239 } 1240 1241 SEC("tc") 1242 __description("multiply mixed sign bounds. test 1") 1243 __success __log_level(2) 1244 __msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))") 1245 __naked void mult_mixed0_sign(void) 1246 { 1247 asm volatile ( 1248 "call %[bpf_get_prandom_u32];" 1249 "r6 = r0;" 1250 "call %[bpf_get_prandom_u32];" 1251 "r7 = r0;" 1252 "r6 &= 0xf;" 1253 "r6 -= 1000000000;" 1254 "r7 &= 0xf;" 1255 "r7 -= 2000000000;" 1256 "r6 *= r7;" 1257 "exit" 1258 : 1259 : __imm(bpf_get_prandom_u32), 1260 __imm(bpf_skb_store_bytes) 1261 : __clobber_all); 1262 } 1263 1264 SEC("tc") 1265 __description("multiply mixed sign bounds. test 2") 1266 __success __log_level(2) 1267 __msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)") 1268 __naked void mult_mixed1_sign(void) 1269 { 1270 asm volatile ( 1271 "call %[bpf_get_prandom_u32];" 1272 "r6 = r0;" 1273 "call %[bpf_get_prandom_u32];" 1274 "r7 = r0;" 1275 "r6 &= 0xf;" 1276 "r6 -= 0xa;" 1277 "r7 &= 0xf;" 1278 "r7 -= 0x14;" 1279 "r6 *= r7;" 1280 "exit" 1281 : 1282 : __imm(bpf_get_prandom_u32), 1283 __imm(bpf_skb_store_bytes) 1284 : __clobber_all); 1285 } 1286 1287 SEC("tc") 1288 __description("multiply negative bounds") 1289 __success __log_level(2) 1290 __msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))") 1291 __naked void mult_sign_bounds(void) 1292 { 1293 asm volatile ( 1294 "r8 = 0x7fff;" 1295 "call %[bpf_get_prandom_u32];" 1296 "r6 = r0;" 1297 "call %[bpf_get_prandom_u32];" 1298 "r7 = r0;" 1299 "r6 &= 0xa;" 1300 "r6 -= r8;" 1301 "r7 &= 0xf;" 1302 "r7 -= r8;" 1303 "r6 *= r7;" 1304 "exit" 1305 : 1306 : __imm(bpf_get_prandom_u32), 1307 __imm(bpf_skb_store_bytes) 1308 : __clobber_all); 1309 } 1310 1311 SEC("tc") 1312 __description("multiply bounds that don't cross signed boundary") 1313 __success __log_level(2) 1314 __msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))") 1315 __naked void mult_no_sign_crossing(void) 1316 { 1317 asm volatile ( 1318 "r6 = 0xb;" 1319 "r8 = 0xb3c3f8c99262687 ll;" 1320 "call %[bpf_get_prandom_u32];" 1321 "r7 = r0;" 1322 "r6 &= r7;" 1323 "r8 *= r6;" 1324 "exit" 1325 : 1326 : __imm(bpf_get_prandom_u32), 1327 __imm(bpf_skb_store_bytes) 1328 : __clobber_all); 1329 } 1330 1331 SEC("tc") 1332 __description("multiplication overflow, result in unbounded reg. test 1") 1333 __success __log_level(2) 1334 __msg("r6 *= r7 {{.*}}; R6_w=scalar()") 1335 __naked void mult_unsign_ovf(void) 1336 { 1337 asm volatile ( 1338 "r8 = 0x7ffffffffff ll;" 1339 "call %[bpf_get_prandom_u32];" 1340 "r6 = r0;" 1341 "call %[bpf_get_prandom_u32];" 1342 "r7 = r0;" 1343 "r6 &= 0x7fffffff;" 1344 "r7 &= r8;" 1345 "r6 *= r7;" 1346 "exit" 1347 : 1348 : __imm(bpf_get_prandom_u32), 1349 __imm(bpf_skb_store_bytes) 1350 : __clobber_all); 1351 } 1352 1353 SEC("tc") 1354 __description("multiplication overflow, result in unbounded reg. test 2") 1355 __success __log_level(2) 1356 __msg("r6 *= r7 {{.*}}; R6_w=scalar()") 1357 __naked void mult_sign_ovf(void) 1358 { 1359 asm volatile ( 1360 "r8 = 0x7ffffffff ll;" 1361 "call %[bpf_get_prandom_u32];" 1362 "r6 = r0;" 1363 "call %[bpf_get_prandom_u32];" 1364 "r7 = r0;" 1365 "r6 &= 0xa;" 1366 "r6 -= r8;" 1367 "r7 &= 0x7fffffff;" 1368 "r6 *= r7;" 1369 "exit" 1370 : 1371 : __imm(bpf_get_prandom_u32), 1372 __imm(bpf_skb_store_bytes) 1373 : __clobber_all); 1374 } 1375 1376 SEC("socket") 1377 __description("64-bit addition, all outcomes overflow") 1378 __success __log_level(2) 1379 __msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") 1380 __retval(0) 1381 __naked void add64_full_overflow(void) 1382 { 1383 asm volatile ( 1384 "call %[bpf_get_prandom_u32];" 1385 "r4 = r0;" 1386 "r3 = 0xa000000000000000 ll;" 1387 "r3 |= r4;" 1388 "r3 += r3;" 1389 "r0 = 0;" 1390 "exit" 1391 : 1392 : __imm(bpf_get_prandom_u32) 1393 : __clobber_all); 1394 } 1395 1396 SEC("socket") 1397 __description("64-bit addition, partial overflow, result in unbounded reg") 1398 __success __log_level(2) 1399 __msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()") 1400 __retval(0) 1401 __naked void add64_partial_overflow(void) 1402 { 1403 asm volatile ( 1404 "call %[bpf_get_prandom_u32];" 1405 "r4 = r0;" 1406 "r3 = 2;" 1407 "r3 |= r4;" 1408 "r3 += r3;" 1409 "r0 = 0;" 1410 "exit" 1411 : 1412 : __imm(bpf_get_prandom_u32) 1413 : __clobber_all); 1414 } 1415 1416 SEC("socket") 1417 __description("32-bit addition overflow, all outcomes overflow") 1418 __success __log_level(2) 1419 __msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))") 1420 __retval(0) 1421 __naked void add32_full_overflow(void) 1422 { 1423 asm volatile ( 1424 "call %[bpf_get_prandom_u32];" 1425 "w4 = w0;" 1426 "w3 = 0xa0000000;" 1427 "w3 |= w4;" 1428 "w3 += w3;" 1429 "r0 = 0;" 1430 "exit" 1431 : 1432 : __imm(bpf_get_prandom_u32) 1433 : __clobber_all); 1434 } 1435 1436 SEC("socket") 1437 __description("32-bit addition, partial overflow, result in unbounded u32 bounds") 1438 __success __log_level(2) 1439 __msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") 1440 __retval(0) 1441 __naked void add32_partial_overflow(void) 1442 { 1443 asm volatile ( 1444 "call %[bpf_get_prandom_u32];" 1445 "w4 = w0;" 1446 "w3 = 2;" 1447 "w3 |= w4;" 1448 "w3 += w3;" 1449 "r0 = 0;" 1450 "exit" 1451 : 1452 : __imm(bpf_get_prandom_u32) 1453 : __clobber_all); 1454 } 1455 1456 SEC("socket") 1457 __description("64-bit subtraction, all outcomes underflow") 1458 __success __log_level(2) 1459 __msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)") 1460 __retval(0) 1461 __naked void sub64_full_overflow(void) 1462 { 1463 asm volatile ( 1464 "call %[bpf_get_prandom_u32];" 1465 "r1 = r0;" 1466 "r2 = 0x8000000000000000 ll;" 1467 "r1 |= r2;" 1468 "r3 = 0;" 1469 "r3 -= r1;" 1470 "r0 = 0;" 1471 "exit" 1472 : 1473 : __imm(bpf_get_prandom_u32) 1474 : __clobber_all); 1475 } 1476 1477 SEC("socket") 1478 __description("64-bit subtraction, partial overflow, result in unbounded reg") 1479 __success __log_level(2) 1480 __msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()") 1481 __retval(0) 1482 __naked void sub64_partial_overflow(void) 1483 { 1484 asm volatile ( 1485 "call %[bpf_get_prandom_u32];" 1486 "r3 = r0;" 1487 "r2 = 1;" 1488 "r3 -= r2;" 1489 "r0 = 0;" 1490 "exit" 1491 : 1492 : __imm(bpf_get_prandom_u32) 1493 : __clobber_all); 1494 } 1495 1496 SEC("socket") 1497 __description("32-bit subtraction overflow, all outcomes underflow") 1498 __success __log_level(2) 1499 __msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))") 1500 __retval(0) 1501 __naked void sub32_full_overflow(void) 1502 { 1503 asm volatile ( 1504 "call %[bpf_get_prandom_u32];" 1505 "w1 = w0;" 1506 "w2 = 0x80000000;" 1507 "w1 |= w2;" 1508 "w3 = 0;" 1509 "w3 -= w1;" 1510 "r0 = 0;" 1511 "exit" 1512 : 1513 : __imm(bpf_get_prandom_u32) 1514 : __clobber_all); 1515 } 1516 1517 SEC("socket") 1518 __description("32-bit subtraction, partial overflow, result in unbounded u32 bounds") 1519 __success __log_level(2) 1520 __msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") 1521 __retval(0) 1522 __naked void sub32_partial_overflow(void) 1523 { 1524 asm volatile ( 1525 "call %[bpf_get_prandom_u32];" 1526 "w3 = w0;" 1527 "w2 = 1;" 1528 "w3 -= w2;" 1529 "r0 = 0;" 1530 "exit" 1531 : 1532 : __imm(bpf_get_prandom_u32) 1533 : __clobber_all); 1534 } 1535 1536 SEC("socket") 1537 __description("dead branch on jset, does not result in invariants violation error") 1538 __success __log_level(2) 1539 __retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) 1540 __naked void jset_range_analysis(void) 1541 { 1542 asm volatile (" \ 1543 call %[bpf_get_netns_cookie]; \ 1544 if r0 == 0 goto l0_%=; \ 1545 if r0 & 0xffffffff goto +0; \ 1546 l0_%=: r0 = 0; \ 1547 exit; \ 1548 " : 1549 : __imm(bpf_get_netns_cookie) 1550 : __clobber_all); 1551 } 1552 1553 /* This test covers the bounds deduction on 64bits when the s64 and u64 ranges 1554 * overlap on the negative side. At instruction 7, the ranges look as follows: 1555 * 1556 * 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX 1557 * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | 1558 * |----------------------------|------------------------------| 1559 * |xxxxxxxxxx] [xxxxxxxxxxxx| 1560 * 0 smax=0xeffffeee smin=-655 -1 1561 * 1562 * We should therefore deduce the following new bounds: 1563 * 1564 * 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX 1565 * | [xxx] | 1566 * |----------------------------|------------------------------| 1567 * | [xxx] | 1568 * 0 s64=[-655;-146] -1 1569 * 1570 * Without the deduction cross sign boundary, we end up with an invariant 1571 * violation error. 1572 */ 1573 SEC("socket") 1574 __description("bounds deduction cross sign boundary, negative overlap") 1575 __success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) 1576 __msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))") 1577 __retval(0) 1578 __naked void bounds_deduct_negative_overlap(void) 1579 { 1580 asm volatile(" \ 1581 call %[bpf_get_prandom_u32]; \ 1582 w3 = w0; \ 1583 w6 = (s8)w0; \ 1584 r0 = (s8)r0; \ 1585 if w6 >= 0xf0000000 goto l0_%=; \ 1586 r0 += r6; \ 1587 r6 += 400; \ 1588 r0 -= r6; \ 1589 if r3 < r0 goto l0_%=; \ 1590 l0_%=: r0 = 0; \ 1591 exit; \ 1592 " : 1593 : __imm(bpf_get_prandom_u32) 1594 : __clobber_all); 1595 } 1596 1597 /* This test covers the bounds deduction on 64bits when the s64 and u64 ranges 1598 * overlap on the positive side. At instruction 3, the ranges look as follows: 1599 * 1600 * 0 umin=0 umax=0xffffffffffffff00 U64_MAX 1601 * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | 1602 * |----------------------------|------------------------------| 1603 * |xxxxxxxx] [xxxxxxxx| 1604 * 0 smax=127 smin=-128 -1 1605 * 1606 * We should therefore deduce the following new bounds: 1607 * 1608 * 0 u64=[0;127] U64_MAX 1609 * [xxxxxxxx] | 1610 * |----------------------------|------------------------------| 1611 * [xxxxxxxx] | 1612 * 0 s64=[0;127] -1 1613 * 1614 * Without the deduction cross sign boundary, the program is rejected due to 1615 * the frame pointer write. 1616 */ 1617 SEC("socket") 1618 __description("bounds deduction cross sign boundary, positive overlap") 1619 __success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) 1620 __msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))") 1621 __retval(0) 1622 __naked void bounds_deduct_positive_overlap(void) 1623 { 1624 asm volatile(" \ 1625 call %[bpf_get_prandom_u32]; \ 1626 r0 = (s8)r0; \ 1627 r1 = 0xffffffffffffff00; \ 1628 if r0 > r1 goto l0_%=; \ 1629 if r0 < 128 goto l0_%=; \ 1630 r10 = 0; \ 1631 l0_%=: r0 = 0; \ 1632 exit; \ 1633 " : 1634 : __imm(bpf_get_prandom_u32) 1635 : __clobber_all); 1636 } 1637 1638 /* This test is the same as above, but the s64 and u64 ranges overlap in two 1639 * places. At instruction 3, the ranges look as follows: 1640 * 1641 * 0 umin=0 umax=0xffffffffffffff80 U64_MAX 1642 * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | 1643 * |----------------------------|------------------------------| 1644 * |xxxxxxxx] [xxxxxxxx| 1645 * 0 smax=127 smin=-128 -1 1646 * 1647 * 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and 1648 * the program should fail due to the frame pointer write. 1649 */ 1650 SEC("socket") 1651 __description("bounds deduction cross sign boundary, two overlaps") 1652 __failure __flag(BPF_F_TEST_REG_INVARIANTS) 1653 __msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)") 1654 __msg("frame pointer is read only") 1655 __naked void bounds_deduct_two_overlaps(void) 1656 { 1657 asm volatile(" \ 1658 call %[bpf_get_prandom_u32]; \ 1659 r0 = (s8)r0; \ 1660 r1 = 0xffffffffffffff80; \ 1661 if r0 > r1 goto l0_%=; \ 1662 if r0 < 128 goto l0_%=; \ 1663 r10 = 0; \ 1664 l0_%=: r0 = 0; \ 1665 exit; \ 1666 " : 1667 : __imm(bpf_get_prandom_u32) 1668 : __clobber_all); 1669 } 1670 1671 char _license[] SEC("license") = "GPL"; 1672