1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2020 Cloudflare 3 #include <error.h> 4 #include <netinet/tcp.h> 5 #include <sys/epoll.h> 6 7 #include "test_progs.h" 8 #include "test_skmsg_load_helpers.skel.h" 9 #include "test_sockmap_update.skel.h" 10 #include "test_sockmap_invalid_update.skel.h" 11 #include "test_sockmap_skb_verdict_attach.skel.h" 12 #include "test_sockmap_progs_query.skel.h" 13 #include "test_sockmap_pass_prog.skel.h" 14 #include "test_sockmap_drop_prog.skel.h" 15 #include "test_sockmap_change_tail.skel.h" 16 #include "bpf_iter_sockmap.skel.h" 17 18 #include "sockmap_helpers.h" 19 20 #define TCP_REPAIR 19 /* TCP sock is under repair right now */ 21 22 #define TCP_REPAIR_ON 1 23 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ 24 25 static int connected_socket_v4(void) 26 { 27 struct sockaddr_in addr = { 28 .sin_family = AF_INET, 29 .sin_port = htons(80), 30 .sin_addr = { inet_addr("127.0.0.1") }, 31 }; 32 socklen_t len = sizeof(addr); 33 int s, repair, err; 34 35 s = socket(AF_INET, SOCK_STREAM, 0); 36 if (!ASSERT_GE(s, 0, "socket")) 37 goto error; 38 39 repair = TCP_REPAIR_ON; 40 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 41 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) 42 goto error; 43 44 err = connect(s, (struct sockaddr *)&addr, len); 45 if (!ASSERT_OK(err, "connect")) 46 goto error; 47 48 repair = TCP_REPAIR_OFF_NO_WP; 49 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 50 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) 51 goto error; 52 53 return s; 54 error: 55 perror(__func__); 56 close(s); 57 return -1; 58 } 59 60 static void compare_cookies(struct bpf_map *src, struct bpf_map *dst) 61 { 62 __u32 i, max_entries = bpf_map__max_entries(src); 63 int err, src_fd, dst_fd; 64 65 src_fd = bpf_map__fd(src); 66 dst_fd = bpf_map__fd(dst); 67 68 for (i = 0; i < max_entries; i++) { 69 __u64 src_cookie, dst_cookie; 70 71 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie); 72 if (err && errno == ENOENT) { 73 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 74 ASSERT_ERR(err, "map_lookup_elem(dst)"); 75 ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)"); 76 continue; 77 } 78 if (!ASSERT_OK(err, "lookup_elem(src)")) 79 continue; 80 81 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 82 if (!ASSERT_OK(err, "lookup_elem(dst)")) 83 continue; 84 85 ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch"); 86 } 87 } 88 89 /* Create a map, populate it with one socket, and free the map. */ 90 static void test_sockmap_create_update_free(enum bpf_map_type map_type) 91 { 92 const int zero = 0; 93 int s, map, err; 94 95 s = connected_socket_v4(); 96 if (!ASSERT_GE(s, 0, "connected_socket_v4")) 97 return; 98 99 map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); 100 if (!ASSERT_GE(map, 0, "bpf_map_create")) 101 goto out; 102 103 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); 104 if (!ASSERT_OK(err, "bpf_map_update")) 105 goto out; 106 107 out: 108 close(map); 109 close(s); 110 } 111 112 static void test_sockmap_vsock_delete_on_close(void) 113 { 114 int map, c, p, err, zero = 0; 115 116 map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), 117 sizeof(int), 1, NULL); 118 if (!ASSERT_OK_FD(map, "bpf_map_create")) 119 return; 120 121 err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); 122 if (!ASSERT_OK(err, "create_pair")) 123 goto close_map; 124 125 if (xbpf_map_update_elem(map, &zero, &c, BPF_NOEXIST)) 126 goto close_socks; 127 128 xclose(c); 129 xclose(p); 130 131 err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); 132 if (!ASSERT_OK(err, "create_pair")) 133 goto close_map; 134 135 err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST); 136 ASSERT_OK(err, "after close(), bpf_map_update"); 137 138 close_socks: 139 xclose(c); 140 xclose(p); 141 close_map: 142 xclose(map); 143 } 144 145 static void test_skmsg_helpers(enum bpf_map_type map_type) 146 { 147 struct test_skmsg_load_helpers *skel; 148 int err, map, verdict; 149 150 skel = test_skmsg_load_helpers__open_and_load(); 151 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load")) 152 return; 153 154 verdict = bpf_program__fd(skel->progs.prog_msg_verdict); 155 map = bpf_map__fd(skel->maps.sock_map); 156 157 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); 158 if (!ASSERT_OK(err, "bpf_prog_attach")) 159 goto out; 160 161 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT); 162 if (!ASSERT_OK(err, "bpf_prog_detach2")) 163 goto out; 164 out: 165 test_skmsg_load_helpers__destroy(skel); 166 } 167 168 static void test_skmsg_helpers_with_link(enum bpf_map_type map_type) 169 { 170 struct bpf_program *prog, *prog_clone, *prog_clone2; 171 DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts); 172 struct test_skmsg_load_helpers *skel; 173 struct bpf_link *link, *link2; 174 int err, map; 175 176 skel = test_skmsg_load_helpers__open_and_load(); 177 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load")) 178 return; 179 180 prog = skel->progs.prog_msg_verdict; 181 prog_clone = skel->progs.prog_msg_verdict_clone; 182 prog_clone2 = skel->progs.prog_msg_verdict_clone2; 183 map = bpf_map__fd(skel->maps.sock_map); 184 185 link = bpf_program__attach_sockmap(prog, map); 186 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 187 goto out; 188 189 /* Fail since bpf_link for the same prog has been created. */ 190 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_MSG_VERDICT, 0); 191 if (!ASSERT_ERR(err, "bpf_prog_attach")) 192 goto out; 193 194 /* Fail since bpf_link for the same prog type has been created. */ 195 link2 = bpf_program__attach_sockmap(prog_clone, map); 196 if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) { 197 bpf_link__detach(link2); 198 goto out; 199 } 200 201 err = bpf_link__update_program(link, prog_clone); 202 if (!ASSERT_OK(err, "bpf_link__update_program")) 203 goto out; 204 205 /* Fail since a prog with different type attempts to do update. */ 206 err = bpf_link__update_program(link, skel->progs.prog_skb_verdict); 207 if (!ASSERT_ERR(err, "bpf_link__update_program")) 208 goto out; 209 210 /* Fail since the old prog does not match the one in the kernel. */ 211 opts.old_prog_fd = bpf_program__fd(prog_clone2); 212 opts.flags = BPF_F_REPLACE; 213 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts); 214 if (!ASSERT_ERR(err, "bpf_link_update")) 215 goto out; 216 217 opts.old_prog_fd = bpf_program__fd(prog_clone); 218 opts.flags = BPF_F_REPLACE; 219 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts); 220 if (!ASSERT_OK(err, "bpf_link_update")) 221 goto out; 222 out: 223 bpf_link__detach(link); 224 test_skmsg_load_helpers__destroy(skel); 225 } 226 227 static void test_sockmap_update(enum bpf_map_type map_type) 228 { 229 int err, prog, src; 230 struct test_sockmap_update *skel; 231 struct bpf_map *dst_map; 232 const __u32 zero = 0; 233 char dummy[14] = {0}; 234 LIBBPF_OPTS(bpf_test_run_opts, topts, 235 .data_in = dummy, 236 .data_size_in = sizeof(dummy), 237 .repeat = 1, 238 ); 239 __s64 sk; 240 241 sk = connected_socket_v4(); 242 if (!ASSERT_NEQ(sk, -1, "connected_socket_v4")) 243 return; 244 245 skel = test_sockmap_update__open_and_load(); 246 if (!ASSERT_OK_PTR(skel, "open_and_load")) 247 goto close_sk; 248 249 prog = bpf_program__fd(skel->progs.copy_sock_map); 250 src = bpf_map__fd(skel->maps.src); 251 if (map_type == BPF_MAP_TYPE_SOCKMAP) 252 dst_map = skel->maps.dst_sock_map; 253 else 254 dst_map = skel->maps.dst_sock_hash; 255 256 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); 257 if (!ASSERT_OK(err, "update_elem(src)")) 258 goto out; 259 260 err = bpf_prog_test_run_opts(prog, &topts); 261 if (!ASSERT_OK(err, "test_run")) 262 goto out; 263 if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) 264 goto out; 265 266 compare_cookies(skel->maps.src, dst_map); 267 268 out: 269 test_sockmap_update__destroy(skel); 270 close_sk: 271 close(sk); 272 } 273 274 static void test_sockmap_invalid_update(void) 275 { 276 struct test_sockmap_invalid_update *skel; 277 278 skel = test_sockmap_invalid_update__open_and_load(); 279 if (!ASSERT_NULL(skel, "open_and_load")) 280 test_sockmap_invalid_update__destroy(skel); 281 } 282 283 static void test_sockmap_copy(enum bpf_map_type map_type) 284 { 285 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 286 int err, len, src_fd, iter_fd; 287 union bpf_iter_link_info linfo = {}; 288 __u32 i, num_sockets, num_elems; 289 struct bpf_iter_sockmap *skel; 290 __s64 *sock_fd = NULL; 291 struct bpf_link *link; 292 struct bpf_map *src; 293 char buf[64]; 294 295 skel = bpf_iter_sockmap__open_and_load(); 296 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) 297 return; 298 299 if (map_type == BPF_MAP_TYPE_SOCKMAP) { 300 src = skel->maps.sockmap; 301 num_elems = bpf_map__max_entries(src); 302 num_sockets = num_elems - 1; 303 } else { 304 src = skel->maps.sockhash; 305 num_elems = bpf_map__max_entries(src) - 1; 306 num_sockets = num_elems; 307 } 308 309 sock_fd = calloc(num_sockets, sizeof(*sock_fd)); 310 if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)")) 311 goto out; 312 313 for (i = 0; i < num_sockets; i++) 314 sock_fd[i] = -1; 315 316 src_fd = bpf_map__fd(src); 317 318 for (i = 0; i < num_sockets; i++) { 319 sock_fd[i] = connected_socket_v4(); 320 if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4")) 321 goto out; 322 323 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST); 324 if (!ASSERT_OK(err, "map_update")) 325 goto out; 326 } 327 328 linfo.map.map_fd = src_fd; 329 opts.link_info = &linfo; 330 opts.link_info_len = sizeof(linfo); 331 link = bpf_program__attach_iter(skel->progs.copy, &opts); 332 if (!ASSERT_OK_PTR(link, "attach_iter")) 333 goto out; 334 335 iter_fd = bpf_iter_create(bpf_link__fd(link)); 336 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 337 goto free_link; 338 339 /* do some tests */ 340 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 341 ; 342 if (!ASSERT_GE(len, 0, "read")) 343 goto close_iter; 344 345 /* test results */ 346 if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems")) 347 goto close_iter; 348 349 if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks")) 350 goto close_iter; 351 352 compare_cookies(src, skel->maps.dst); 353 354 close_iter: 355 close(iter_fd); 356 free_link: 357 bpf_link__destroy(link); 358 out: 359 for (i = 0; sock_fd && i < num_sockets; i++) 360 if (sock_fd[i] >= 0) 361 close(sock_fd[i]); 362 if (sock_fd) 363 free(sock_fd); 364 bpf_iter_sockmap__destroy(skel); 365 } 366 367 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first, 368 enum bpf_attach_type second) 369 { 370 struct test_sockmap_skb_verdict_attach *skel; 371 int err, map, verdict; 372 373 skel = test_sockmap_skb_verdict_attach__open_and_load(); 374 if (!ASSERT_OK_PTR(skel, "open_and_load")) 375 return; 376 377 verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 378 map = bpf_map__fd(skel->maps.sock_map); 379 380 err = bpf_prog_attach(verdict, map, first, 0); 381 if (!ASSERT_OK(err, "bpf_prog_attach")) 382 goto out; 383 384 err = bpf_prog_attach(verdict, map, second, 0); 385 ASSERT_EQ(err, -EBUSY, "prog_attach_fail"); 386 387 err = bpf_prog_detach2(verdict, map, first); 388 if (!ASSERT_OK(err, "bpf_prog_detach2")) 389 goto out; 390 out: 391 test_sockmap_skb_verdict_attach__destroy(skel); 392 } 393 394 static void test_sockmap_skb_verdict_attach_with_link(void) 395 { 396 struct test_sockmap_skb_verdict_attach *skel; 397 struct bpf_program *prog; 398 struct bpf_link *link; 399 int err, map; 400 401 skel = test_sockmap_skb_verdict_attach__open_and_load(); 402 if (!ASSERT_OK_PTR(skel, "open_and_load")) 403 return; 404 prog = skel->progs.prog_skb_verdict; 405 map = bpf_map__fd(skel->maps.sock_map); 406 link = bpf_program__attach_sockmap(prog, map); 407 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 408 goto out; 409 410 bpf_link__detach(link); 411 412 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0); 413 if (!ASSERT_OK(err, "bpf_prog_attach")) 414 goto out; 415 416 /* Fail since attaching with the same prog/map has been done. */ 417 link = bpf_program__attach_sockmap(prog, map); 418 if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap")) 419 bpf_link__detach(link); 420 421 err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT); 422 if (!ASSERT_OK(err, "bpf_prog_detach2")) 423 goto out; 424 out: 425 test_sockmap_skb_verdict_attach__destroy(skel); 426 } 427 428 static __u32 query_prog_id(int prog_fd) 429 { 430 struct bpf_prog_info info = {}; 431 __u32 info_len = sizeof(info); 432 int err; 433 434 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); 435 if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") || 436 !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd")) 437 return 0; 438 439 return info.id; 440 } 441 442 static void test_sockmap_progs_query(enum bpf_attach_type attach_type) 443 { 444 struct test_sockmap_progs_query *skel; 445 int err, map_fd, verdict_fd; 446 __u32 attach_flags = 0; 447 __u32 prog_ids[3] = {}; 448 __u32 prog_cnt = 3; 449 450 skel = test_sockmap_progs_query__open_and_load(); 451 if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load")) 452 return; 453 454 map_fd = bpf_map__fd(skel->maps.sock_map); 455 456 if (attach_type == BPF_SK_MSG_VERDICT) 457 verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict); 458 else 459 verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict); 460 461 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, 462 &attach_flags, prog_ids, &prog_cnt); 463 ASSERT_OK(err, "bpf_prog_query failed"); 464 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); 465 ASSERT_EQ(prog_cnt, 0, "wrong program count on query"); 466 467 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0); 468 if (!ASSERT_OK(err, "bpf_prog_attach failed")) 469 goto out; 470 471 prog_cnt = 1; 472 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, 473 &attach_flags, prog_ids, &prog_cnt); 474 ASSERT_OK(err, "bpf_prog_query failed"); 475 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); 476 ASSERT_EQ(prog_cnt, 1, "wrong program count on query"); 477 ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd), 478 "wrong prog_ids on query"); 479 480 bpf_prog_detach2(verdict_fd, map_fd, attach_type); 481 out: 482 test_sockmap_progs_query__destroy(skel); 483 } 484 485 #define MAX_EVENTS 10 486 static void test_sockmap_skb_verdict_shutdown(void) 487 { 488 int n, err, map, verdict, c1 = -1, p1 = -1; 489 struct epoll_event ev, events[MAX_EVENTS]; 490 struct test_sockmap_pass_prog *skel; 491 int zero = 0; 492 int epollfd; 493 char b; 494 495 skel = test_sockmap_pass_prog__open_and_load(); 496 if (!ASSERT_OK_PTR(skel, "open_and_load")) 497 return; 498 499 verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 500 map = bpf_map__fd(skel->maps.sock_map_rx); 501 502 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); 503 if (!ASSERT_OK(err, "bpf_prog_attach")) 504 goto out; 505 506 err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1); 507 if (err < 0) 508 goto out; 509 510 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); 511 if (err < 0) 512 goto out_close; 513 514 shutdown(p1, SHUT_WR); 515 516 ev.events = EPOLLIN; 517 ev.data.fd = c1; 518 519 epollfd = epoll_create1(0); 520 if (!ASSERT_GT(epollfd, -1, "epoll_create(0)")) 521 goto out_close; 522 err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev); 523 if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)")) 524 goto out_close; 525 err = epoll_wait(epollfd, events, MAX_EVENTS, -1); 526 if (!ASSERT_EQ(err, 1, "epoll_wait(fd)")) 527 goto out_close; 528 529 n = recv(c1, &b, 1, MSG_DONTWAIT); 530 ASSERT_EQ(n, 0, "recv(fin)"); 531 out_close: 532 close(c1); 533 close(p1); 534 out: 535 test_sockmap_pass_prog__destroy(skel); 536 } 537 538 539 static void test_sockmap_skb_verdict_fionread(bool pass_prog) 540 { 541 int err, map, verdict, c0 = -1, c1 = -1, p0 = -1, p1 = -1; 542 int expected, zero = 0, sent, recvd, avail; 543 struct test_sockmap_pass_prog *pass = NULL; 544 struct test_sockmap_drop_prog *drop = NULL; 545 char buf[256] = "0123456789"; 546 547 if (pass_prog) { 548 pass = test_sockmap_pass_prog__open_and_load(); 549 if (!ASSERT_OK_PTR(pass, "open_and_load")) 550 return; 551 verdict = bpf_program__fd(pass->progs.prog_skb_verdict); 552 map = bpf_map__fd(pass->maps.sock_map_rx); 553 expected = sizeof(buf); 554 } else { 555 drop = test_sockmap_drop_prog__open_and_load(); 556 if (!ASSERT_OK_PTR(drop, "open_and_load")) 557 return; 558 verdict = bpf_program__fd(drop->progs.prog_skb_verdict); 559 map = bpf_map__fd(drop->maps.sock_map_rx); 560 /* On drop data is consumed immediately and copied_seq inc'd */ 561 expected = 0; 562 } 563 564 565 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); 566 if (!ASSERT_OK(err, "bpf_prog_attach")) 567 goto out; 568 569 err = create_socket_pairs(AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1); 570 if (!ASSERT_OK(err, "create_socket_pairs()")) 571 goto out; 572 573 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); 574 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) 575 goto out_close; 576 577 sent = xsend(p1, &buf, sizeof(buf), 0); 578 ASSERT_EQ(sent, sizeof(buf), "xsend(p0)"); 579 err = ioctl(c1, FIONREAD, &avail); 580 ASSERT_OK(err, "ioctl(FIONREAD) error"); 581 ASSERT_EQ(avail, expected, "ioctl(FIONREAD)"); 582 /* On DROP test there will be no data to read */ 583 if (pass_prog) { 584 recvd = recv_timeout(c1, &buf, sizeof(buf), MSG_DONTWAIT, IO_TIMEOUT_SEC); 585 ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); 586 } 587 588 out_close: 589 close(c0); 590 close(p0); 591 close(c1); 592 close(p1); 593 out: 594 if (pass_prog) 595 test_sockmap_pass_prog__destroy(pass); 596 else 597 test_sockmap_drop_prog__destroy(drop); 598 } 599 600 static void test_sockmap_skb_verdict_change_tail(void) 601 { 602 struct test_sockmap_change_tail *skel; 603 int err, map, verdict; 604 int c1, p1, sent, recvd; 605 int zero = 0; 606 char buf[2]; 607 608 skel = test_sockmap_change_tail__open_and_load(); 609 if (!ASSERT_OK_PTR(skel, "open_and_load")) 610 return; 611 verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 612 map = bpf_map__fd(skel->maps.sock_map_rx); 613 614 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); 615 if (!ASSERT_OK(err, "bpf_prog_attach")) 616 goto out; 617 err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1); 618 if (!ASSERT_OK(err, "create_pair()")) 619 goto out; 620 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); 621 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) 622 goto out_close; 623 sent = xsend(p1, "Tr", 2, 0); 624 ASSERT_EQ(sent, 2, "xsend(p1)"); 625 recvd = recv(c1, buf, 2, 0); 626 ASSERT_EQ(recvd, 1, "recv(c1)"); 627 ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret"); 628 629 sent = xsend(p1, "G", 1, 0); 630 ASSERT_EQ(sent, 1, "xsend(p1)"); 631 recvd = recv(c1, buf, 2, 0); 632 ASSERT_EQ(recvd, 2, "recv(c1)"); 633 ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret"); 634 635 sent = xsend(p1, "E", 1, 0); 636 ASSERT_EQ(sent, 1, "xsend(p1)"); 637 recvd = recv(c1, buf, 1, 0); 638 ASSERT_EQ(recvd, 1, "recv(c1)"); 639 ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret"); 640 641 out_close: 642 close(c1); 643 close(p1); 644 out: 645 test_sockmap_change_tail__destroy(skel); 646 } 647 648 static void test_sockmap_skb_verdict_peek_helper(int map) 649 { 650 int err, c1, p1, zero = 0, sent, recvd, avail; 651 char snd[256] = "0123456789"; 652 char rcv[256] = "0"; 653 654 err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1); 655 if (!ASSERT_OK(err, "create_pair()")) 656 return; 657 658 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); 659 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) 660 goto out_close; 661 662 sent = xsend(p1, snd, sizeof(snd), 0); 663 ASSERT_EQ(sent, sizeof(snd), "xsend(p1)"); 664 recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK); 665 ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)"); 666 err = ioctl(c1, FIONREAD, &avail); 667 ASSERT_OK(err, "ioctl(FIONREAD) error"); 668 ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)"); 669 recvd = recv(c1, rcv, sizeof(rcv), 0); 670 ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)"); 671 err = ioctl(c1, FIONREAD, &avail); 672 ASSERT_OK(err, "ioctl(FIONREAD) error"); 673 ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)"); 674 675 out_close: 676 close(c1); 677 close(p1); 678 } 679 680 static void test_sockmap_skb_verdict_peek(void) 681 { 682 struct test_sockmap_pass_prog *pass; 683 int err, map, verdict; 684 685 pass = test_sockmap_pass_prog__open_and_load(); 686 if (!ASSERT_OK_PTR(pass, "open_and_load")) 687 return; 688 verdict = bpf_program__fd(pass->progs.prog_skb_verdict); 689 map = bpf_map__fd(pass->maps.sock_map_rx); 690 691 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); 692 if (!ASSERT_OK(err, "bpf_prog_attach")) 693 goto out; 694 695 test_sockmap_skb_verdict_peek_helper(map); 696 697 out: 698 test_sockmap_pass_prog__destroy(pass); 699 } 700 701 static void test_sockmap_skb_verdict_peek_with_link(void) 702 { 703 struct test_sockmap_pass_prog *pass; 704 struct bpf_program *prog; 705 struct bpf_link *link; 706 int err, map; 707 708 pass = test_sockmap_pass_prog__open_and_load(); 709 if (!ASSERT_OK_PTR(pass, "open_and_load")) 710 return; 711 prog = pass->progs.prog_skb_verdict; 712 map = bpf_map__fd(pass->maps.sock_map_rx); 713 link = bpf_program__attach_sockmap(prog, map); 714 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 715 goto out; 716 717 err = bpf_link__update_program(link, pass->progs.prog_skb_verdict_clone); 718 if (!ASSERT_OK(err, "bpf_link__update_program")) 719 goto out; 720 721 /* Fail since a prog with different attach type attempts to do update. */ 722 err = bpf_link__update_program(link, pass->progs.prog_skb_parser); 723 if (!ASSERT_ERR(err, "bpf_link__update_program")) 724 goto out; 725 726 test_sockmap_skb_verdict_peek_helper(map); 727 ASSERT_EQ(pass->bss->clone_called, 1, "clone_called"); 728 out: 729 bpf_link__detach(link); 730 test_sockmap_pass_prog__destroy(pass); 731 } 732 733 static void test_sockmap_unconnected_unix(void) 734 { 735 int err, map, stream = 0, dgram = 0, zero = 0; 736 struct test_sockmap_pass_prog *skel; 737 738 skel = test_sockmap_pass_prog__open_and_load(); 739 if (!ASSERT_OK_PTR(skel, "open_and_load")) 740 return; 741 742 map = bpf_map__fd(skel->maps.sock_map_rx); 743 744 stream = xsocket(AF_UNIX, SOCK_STREAM, 0); 745 if (stream < 0) 746 return; 747 748 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 749 if (dgram < 0) { 750 close(stream); 751 return; 752 } 753 754 err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY); 755 ASSERT_ERR(err, "bpf_map_update_elem(stream)"); 756 757 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY); 758 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 759 760 close(stream); 761 close(dgram); 762 } 763 764 static void test_sockmap_many_socket(void) 765 { 766 struct test_sockmap_pass_prog *skel; 767 int stream[2], dgram, udp, tcp; 768 int i, err, map, entry = 0; 769 770 skel = test_sockmap_pass_prog__open_and_load(); 771 if (!ASSERT_OK_PTR(skel, "open_and_load")) 772 return; 773 774 map = bpf_map__fd(skel->maps.sock_map_rx); 775 776 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 777 if (dgram < 0) { 778 test_sockmap_pass_prog__destroy(skel); 779 return; 780 } 781 782 tcp = connected_socket_v4(); 783 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) { 784 close(dgram); 785 test_sockmap_pass_prog__destroy(skel); 786 return; 787 } 788 789 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0); 790 if (udp < 0) { 791 close(dgram); 792 close(tcp); 793 test_sockmap_pass_prog__destroy(skel); 794 return; 795 } 796 797 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream); 798 ASSERT_OK(err, "socketpair(af_unix, sock_stream)"); 799 if (err) 800 goto out; 801 802 for (i = 0; i < 2; i++, entry++) { 803 err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY); 804 ASSERT_OK(err, "bpf_map_update_elem(stream)"); 805 } 806 for (i = 0; i < 2; i++, entry++) { 807 err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY); 808 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 809 } 810 for (i = 0; i < 2; i++, entry++) { 811 err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY); 812 ASSERT_OK(err, "bpf_map_update_elem(udp)"); 813 } 814 for (i = 0; i < 2; i++, entry++) { 815 err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY); 816 ASSERT_OK(err, "bpf_map_update_elem(tcp)"); 817 } 818 for (entry--; entry >= 0; entry--) { 819 err = bpf_map_delete_elem(map, &entry); 820 ASSERT_OK(err, "bpf_map_delete_elem(entry)"); 821 } 822 823 close(stream[0]); 824 close(stream[1]); 825 out: 826 close(dgram); 827 close(tcp); 828 close(udp); 829 test_sockmap_pass_prog__destroy(skel); 830 } 831 832 static void test_sockmap_many_maps(void) 833 { 834 struct test_sockmap_pass_prog *skel; 835 int stream[2], dgram, udp, tcp; 836 int i, err, map[2], entry = 0; 837 838 skel = test_sockmap_pass_prog__open_and_load(); 839 if (!ASSERT_OK_PTR(skel, "open_and_load")) 840 return; 841 842 map[0] = bpf_map__fd(skel->maps.sock_map_rx); 843 map[1] = bpf_map__fd(skel->maps.sock_map_tx); 844 845 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 846 if (dgram < 0) { 847 test_sockmap_pass_prog__destroy(skel); 848 return; 849 } 850 851 tcp = connected_socket_v4(); 852 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) { 853 close(dgram); 854 test_sockmap_pass_prog__destroy(skel); 855 return; 856 } 857 858 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0); 859 if (udp < 0) { 860 close(dgram); 861 close(tcp); 862 test_sockmap_pass_prog__destroy(skel); 863 return; 864 } 865 866 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream); 867 ASSERT_OK(err, "socketpair(af_unix, sock_stream)"); 868 if (err) 869 goto out; 870 871 for (i = 0; i < 2; i++, entry++) { 872 err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY); 873 ASSERT_OK(err, "bpf_map_update_elem(stream)"); 874 } 875 for (i = 0; i < 2; i++, entry++) { 876 err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY); 877 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 878 } 879 for (i = 0; i < 2; i++, entry++) { 880 err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY); 881 ASSERT_OK(err, "bpf_map_update_elem(udp)"); 882 } 883 for (i = 0; i < 2; i++, entry++) { 884 err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY); 885 ASSERT_OK(err, "bpf_map_update_elem(tcp)"); 886 } 887 for (entry--; entry >= 0; entry--) { 888 err = bpf_map_delete_elem(map[1], &entry); 889 entry--; 890 ASSERT_OK(err, "bpf_map_delete_elem(entry)"); 891 err = bpf_map_delete_elem(map[0], &entry); 892 ASSERT_OK(err, "bpf_map_delete_elem(entry)"); 893 } 894 895 close(stream[0]); 896 close(stream[1]); 897 out: 898 close(dgram); 899 close(tcp); 900 close(udp); 901 test_sockmap_pass_prog__destroy(skel); 902 } 903 904 static void test_sockmap_same_sock(void) 905 { 906 struct test_sockmap_pass_prog *skel; 907 int stream[2], dgram, udp, tcp; 908 int i, err, map, zero = 0; 909 910 skel = test_sockmap_pass_prog__open_and_load(); 911 if (!ASSERT_OK_PTR(skel, "open_and_load")) 912 return; 913 914 map = bpf_map__fd(skel->maps.sock_map_rx); 915 916 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 917 if (dgram < 0) { 918 test_sockmap_pass_prog__destroy(skel); 919 return; 920 } 921 922 tcp = connected_socket_v4(); 923 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) { 924 close(dgram); 925 test_sockmap_pass_prog__destroy(skel); 926 return; 927 } 928 929 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0); 930 if (udp < 0) { 931 close(dgram); 932 close(tcp); 933 test_sockmap_pass_prog__destroy(skel); 934 return; 935 } 936 937 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream); 938 ASSERT_OK(err, "socketpair(af_unix, sock_stream)"); 939 if (err) { 940 close(tcp); 941 goto out; 942 } 943 944 for (i = 0; i < 2; i++) { 945 err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY); 946 ASSERT_OK(err, "bpf_map_update_elem(stream)"); 947 } 948 for (i = 0; i < 2; i++) { 949 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY); 950 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 951 } 952 for (i = 0; i < 2; i++) { 953 err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY); 954 ASSERT_OK(err, "bpf_map_update_elem(udp)"); 955 } 956 for (i = 0; i < 2; i++) { 957 err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY); 958 ASSERT_OK(err, "bpf_map_update_elem(tcp)"); 959 } 960 961 close(tcp); 962 err = bpf_map_delete_elem(map, &zero); 963 ASSERT_ERR(err, "bpf_map_delete_elem(entry)"); 964 965 close(stream[0]); 966 close(stream[1]); 967 out: 968 close(dgram); 969 close(udp); 970 test_sockmap_pass_prog__destroy(skel); 971 } 972 973 static void test_sockmap_skb_verdict_vsock_poll(void) 974 { 975 struct test_sockmap_pass_prog *skel; 976 int err, map, conn, peer; 977 struct bpf_program *prog; 978 struct bpf_link *link; 979 char buf = 'x'; 980 int zero = 0; 981 982 skel = test_sockmap_pass_prog__open_and_load(); 983 if (!ASSERT_OK_PTR(skel, "open_and_load")) 984 return; 985 986 if (create_pair(AF_VSOCK, SOCK_STREAM, &conn, &peer)) 987 goto destroy; 988 989 prog = skel->progs.prog_skb_verdict; 990 map = bpf_map__fd(skel->maps.sock_map_rx); 991 link = bpf_program__attach_sockmap(prog, map); 992 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 993 goto close; 994 995 err = bpf_map_update_elem(map, &zero, &conn, BPF_ANY); 996 if (!ASSERT_OK(err, "bpf_map_update_elem")) 997 goto detach; 998 999 if (xsend(peer, &buf, 1, 0) != 1) 1000 goto detach; 1001 1002 err = poll_read(conn, IO_TIMEOUT_SEC); 1003 if (!ASSERT_OK(err, "poll")) 1004 goto detach; 1005 1006 if (xrecv_nonblock(conn, &buf, 1, 0) != 1) 1007 FAIL("xrecv_nonblock"); 1008 detach: 1009 bpf_link__detach(link); 1010 close: 1011 xclose(conn); 1012 xclose(peer); 1013 destroy: 1014 test_sockmap_pass_prog__destroy(skel); 1015 } 1016 1017 static void test_sockmap_vsock_unconnected(void) 1018 { 1019 struct sockaddr_storage addr; 1020 int map, s, zero = 0; 1021 socklen_t alen; 1022 1023 map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), 1024 sizeof(int), 1, NULL); 1025 if (!ASSERT_OK_FD(map, "bpf_map_create")) 1026 return; 1027 1028 s = xsocket(AF_VSOCK, SOCK_STREAM, 0); 1029 if (s < 0) 1030 goto close_map; 1031 1032 /* Fail connect(), but trigger transport assignment. */ 1033 init_addr_loopback(AF_VSOCK, &addr, &alen); 1034 if (!ASSERT_ERR(connect(s, sockaddr(&addr), alen), "connect")) 1035 goto close_sock; 1036 1037 ASSERT_ERR(bpf_map_update_elem(map, &zero, &s, BPF_ANY), "map_update"); 1038 1039 close_sock: 1040 xclose(s); 1041 close_map: 1042 xclose(map); 1043 } 1044 1045 void test_sockmap_basic(void) 1046 { 1047 if (test__start_subtest("sockmap create_update_free")) 1048 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); 1049 if (test__start_subtest("sockhash create_update_free")) 1050 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); 1051 if (test__start_subtest("sockmap vsock delete on close")) 1052 test_sockmap_vsock_delete_on_close(); 1053 if (test__start_subtest("sockmap sk_msg load helpers")) 1054 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP); 1055 if (test__start_subtest("sockhash sk_msg load helpers")) 1056 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH); 1057 if (test__start_subtest("sockmap update")) 1058 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP); 1059 if (test__start_subtest("sockhash update")) 1060 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH); 1061 if (test__start_subtest("sockmap update in unsafe context")) 1062 test_sockmap_invalid_update(); 1063 if (test__start_subtest("sockmap copy")) 1064 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP); 1065 if (test__start_subtest("sockhash copy")) 1066 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH); 1067 if (test__start_subtest("sockmap skb_verdict attach")) { 1068 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT, 1069 BPF_SK_SKB_STREAM_VERDICT); 1070 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT, 1071 BPF_SK_SKB_VERDICT); 1072 } 1073 if (test__start_subtest("sockmap skb_verdict attach_with_link")) 1074 test_sockmap_skb_verdict_attach_with_link(); 1075 if (test__start_subtest("sockmap msg_verdict progs query")) 1076 test_sockmap_progs_query(BPF_SK_MSG_VERDICT); 1077 if (test__start_subtest("sockmap stream_parser progs query")) 1078 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER); 1079 if (test__start_subtest("sockmap stream_verdict progs query")) 1080 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT); 1081 if (test__start_subtest("sockmap skb_verdict progs query")) 1082 test_sockmap_progs_query(BPF_SK_SKB_VERDICT); 1083 if (test__start_subtest("sockmap skb_verdict shutdown")) 1084 test_sockmap_skb_verdict_shutdown(); 1085 if (test__start_subtest("sockmap skb_verdict fionread")) 1086 test_sockmap_skb_verdict_fionread(true); 1087 if (test__start_subtest("sockmap skb_verdict fionread on drop")) 1088 test_sockmap_skb_verdict_fionread(false); 1089 if (test__start_subtest("sockmap skb_verdict change tail")) 1090 test_sockmap_skb_verdict_change_tail(); 1091 if (test__start_subtest("sockmap skb_verdict msg_f_peek")) 1092 test_sockmap_skb_verdict_peek(); 1093 if (test__start_subtest("sockmap skb_verdict msg_f_peek with link")) 1094 test_sockmap_skb_verdict_peek_with_link(); 1095 if (test__start_subtest("sockmap unconnected af_unix")) 1096 test_sockmap_unconnected_unix(); 1097 if (test__start_subtest("sockmap one socket to many map entries")) 1098 test_sockmap_many_socket(); 1099 if (test__start_subtest("sockmap one socket to many maps")) 1100 test_sockmap_many_maps(); 1101 if (test__start_subtest("sockmap same socket replace")) 1102 test_sockmap_same_sock(); 1103 if (test__start_subtest("sockmap sk_msg attach sockmap helpers with link")) 1104 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKMAP); 1105 if (test__start_subtest("sockhash sk_msg attach sockhash helpers with link")) 1106 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH); 1107 if (test__start_subtest("sockmap skb_verdict vsock poll")) 1108 test_sockmap_skb_verdict_vsock_poll(); 1109 if (test__start_subtest("sockmap vsock unconnected")) 1110 test_sockmap_vsock_unconnected(); 1111 } 1112