1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Longest prefix match list implementation 4 * 5 * Copyright (c) 2016,2017 Daniel Mack 6 * Copyright (c) 2016 David Herrmann 7 */ 8 9 #include <linux/bpf.h> 10 #include <linux/btf.h> 11 #include <linux/err.h> 12 #include <linux/slab.h> 13 #include <linux/spinlock.h> 14 #include <linux/vmalloc.h> 15 #include <net/ipv6.h> 16 #include <uapi/linux/btf.h> 17 #include <linux/btf_ids.h> 18 #include <asm/rqspinlock.h> 19 #include <linux/bpf_mem_alloc.h> 20 21 /* Intermediate node */ 22 #define LPM_TREE_NODE_FLAG_IM BIT(0) 23 24 struct lpm_trie_node; 25 26 struct lpm_trie_node { 27 struct lpm_trie_node __rcu *child[2]; 28 u32 prefixlen; 29 u32 flags; 30 u8 data[]; 31 }; 32 33 struct lpm_trie { 34 struct bpf_map map; 35 struct lpm_trie_node __rcu *root; 36 struct bpf_mem_alloc ma; 37 size_t n_entries; 38 size_t max_prefixlen; 39 size_t data_size; 40 rqspinlock_t lock; 41 }; 42 43 /* This trie implements a longest prefix match algorithm that can be used to 44 * match IP addresses to a stored set of ranges. 45 * 46 * Data stored in @data of struct bpf_lpm_key and struct lpm_trie_node is 47 * interpreted as big endian, so data[0] stores the most significant byte. 48 * 49 * Match ranges are internally stored in instances of struct lpm_trie_node 50 * which each contain their prefix length as well as two pointers that may 51 * lead to more nodes containing more specific matches. Each node also stores 52 * a value that is defined by and returned to userspace via the update_elem 53 * and lookup functions. 54 * 55 * For instance, let's start with a trie that was created with a prefix length 56 * of 32, so it can be used for IPv4 addresses, and one single element that 57 * matches 192.168.0.0/16. The data array would hence contain 58 * [0xc0, 0xa8, 0x00, 0x00] in big-endian notation. This documentation will 59 * stick to IP-address notation for readability though. 60 * 61 * As the trie is empty initially, the new node (1) will be places as root 62 * node, denoted as (R) in the example below. As there are no other node, both 63 * child pointers are %NULL. 64 * 65 * +----------------+ 66 * | (1) (R) | 67 * | 192.168.0.0/16 | 68 * | value: 1 | 69 * | [0] [1] | 70 * +----------------+ 71 * 72 * Next, let's add a new node (2) matching 192.168.0.0/24. As there is already 73 * a node with the same data and a smaller prefix (ie, a less specific one), 74 * node (2) will become a child of (1). In child index depends on the next bit 75 * that is outside of what (1) matches, and that bit is 0, so (2) will be 76 * child[0] of (1): 77 * 78 * +----------------+ 79 * | (1) (R) | 80 * | 192.168.0.0/16 | 81 * | value: 1 | 82 * | [0] [1] | 83 * +----------------+ 84 * | 85 * +----------------+ 86 * | (2) | 87 * | 192.168.0.0/24 | 88 * | value: 2 | 89 * | [0] [1] | 90 * +----------------+ 91 * 92 * The child[1] slot of (1) could be filled with another node which has bit #17 93 * (the next bit after the ones that (1) matches on) set to 1. For instance, 94 * 192.168.128.0/24: 95 * 96 * +----------------+ 97 * | (1) (R) | 98 * | 192.168.0.0/16 | 99 * | value: 1 | 100 * | [0] [1] | 101 * +----------------+ 102 * | | 103 * +----------------+ +------------------+ 104 * | (2) | | (3) | 105 * | 192.168.0.0/24 | | 192.168.128.0/24 | 106 * | value: 2 | | value: 3 | 107 * | [0] [1] | | [0] [1] | 108 * +----------------+ +------------------+ 109 * 110 * Let's add another node (4) to the game for 192.168.1.0/24. In order to place 111 * it, node (1) is looked at first, and because (4) of the semantics laid out 112 * above (bit #17 is 0), it would normally be attached to (1) as child[0]. 113 * However, that slot is already allocated, so a new node is needed in between. 114 * That node does not have a value attached to it and it will never be 115 * returned to users as result of a lookup. It is only there to differentiate 116 * the traversal further. It will get a prefix as wide as necessary to 117 * distinguish its two children: 118 * 119 * +----------------+ 120 * | (1) (R) | 121 * | 192.168.0.0/16 | 122 * | value: 1 | 123 * | [0] [1] | 124 * +----------------+ 125 * | | 126 * +----------------+ +------------------+ 127 * | (4) (I) | | (3) | 128 * | 192.168.0.0/23 | | 192.168.128.0/24 | 129 * | value: --- | | value: 3 | 130 * | [0] [1] | | [0] [1] | 131 * +----------------+ +------------------+ 132 * | | 133 * +----------------+ +----------------+ 134 * | (2) | | (5) | 135 * | 192.168.0.0/24 | | 192.168.1.0/24 | 136 * | value: 2 | | value: 5 | 137 * | [0] [1] | | [0] [1] | 138 * +----------------+ +----------------+ 139 * 140 * 192.168.1.1/32 would be a child of (5) etc. 141 * 142 * An intermediate node will be turned into a 'real' node on demand. In the 143 * example above, (4) would be re-used if 192.168.0.0/23 is added to the trie. 144 * 145 * A fully populated trie would have a height of 32 nodes, as the trie was 146 * created with a prefix length of 32. 147 * 148 * The lookup starts at the root node. If the current node matches and if there 149 * is a child that can be used to become more specific, the trie is traversed 150 * downwards. The last node in the traversal that is a non-intermediate one is 151 * returned. 152 */ 153 154 static inline int extract_bit(const u8 *data, size_t index) 155 { 156 return !!(data[index / 8] & (1 << (7 - (index % 8)))); 157 } 158 159 /** 160 * __longest_prefix_match() - determine the longest prefix 161 * @trie: The trie to get internal sizes from 162 * @node: The node to operate on 163 * @key: The key to compare to @node 164 * 165 * Determine the longest prefix of @node that matches the bits in @key. 166 */ 167 static __always_inline 168 size_t __longest_prefix_match(const struct lpm_trie *trie, 169 const struct lpm_trie_node *node, 170 const struct bpf_lpm_trie_key_u8 *key) 171 { 172 u32 limit = min(node->prefixlen, key->prefixlen); 173 u32 prefixlen = 0, i = 0; 174 175 BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32)); 176 BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key_u8, data) % sizeof(u32)); 177 178 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT) 179 180 /* data_size >= 16 has very small probability. 181 * We do not use a loop for optimal code generation. 182 */ 183 if (trie->data_size >= 8) { 184 u64 diff = be64_to_cpu(*(__be64 *)node->data ^ 185 *(__be64 *)key->data); 186 187 prefixlen = 64 - fls64(diff); 188 if (prefixlen >= limit) 189 return limit; 190 if (diff) 191 return prefixlen; 192 i = 8; 193 } 194 #endif 195 196 while (trie->data_size >= i + 4) { 197 u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^ 198 *(__be32 *)&key->data[i]); 199 200 prefixlen += 32 - fls(diff); 201 if (prefixlen >= limit) 202 return limit; 203 if (diff) 204 return prefixlen; 205 i += 4; 206 } 207 208 if (trie->data_size >= i + 2) { 209 u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^ 210 *(__be16 *)&key->data[i]); 211 212 prefixlen += 16 - fls(diff); 213 if (prefixlen >= limit) 214 return limit; 215 if (diff) 216 return prefixlen; 217 i += 2; 218 } 219 220 if (trie->data_size >= i + 1) { 221 prefixlen += 8 - fls(node->data[i] ^ key->data[i]); 222 223 if (prefixlen >= limit) 224 return limit; 225 } 226 227 return prefixlen; 228 } 229 230 static size_t longest_prefix_match(const struct lpm_trie *trie, 231 const struct lpm_trie_node *node, 232 const struct bpf_lpm_trie_key_u8 *key) 233 { 234 return __longest_prefix_match(trie, node, key); 235 } 236 237 /* Called from syscall or from eBPF program */ 238 static void *trie_lookup_elem(struct bpf_map *map, void *_key) 239 { 240 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); 241 struct lpm_trie_node *node, *found = NULL; 242 struct bpf_lpm_trie_key_u8 *key = _key; 243 244 if (key->prefixlen > trie->max_prefixlen) 245 return NULL; 246 247 /* Start walking the trie from the root node ... */ 248 249 for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held()); 250 node;) { 251 unsigned int next_bit; 252 size_t matchlen; 253 254 /* Determine the longest prefix of @node that matches @key. 255 * If it's the maximum possible prefix for this trie, we have 256 * an exact match and can return it directly. 257 */ 258 matchlen = __longest_prefix_match(trie, node, key); 259 if (matchlen == trie->max_prefixlen) { 260 found = node; 261 break; 262 } 263 264 /* If the number of bits that match is smaller than the prefix 265 * length of @node, bail out and return the node we have seen 266 * last in the traversal (ie, the parent). 267 */ 268 if (matchlen < node->prefixlen) 269 break; 270 271 /* Consider this node as return candidate unless it is an 272 * artificially added intermediate one. 273 */ 274 if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) 275 found = node; 276 277 /* If the node match is fully satisfied, let's see if we can 278 * become more specific. Determine the next bit in the key and 279 * traverse down. 280 */ 281 next_bit = extract_bit(key->data, node->prefixlen); 282 node = rcu_dereference_check(node->child[next_bit], 283 rcu_read_lock_bh_held()); 284 } 285 286 if (!found) 287 return NULL; 288 289 return found->data + trie->data_size; 290 } 291 292 static struct lpm_trie_node *lpm_trie_node_alloc(struct lpm_trie *trie, 293 const void *value) 294 { 295 struct lpm_trie_node *node; 296 297 node = bpf_mem_cache_alloc(&trie->ma); 298 299 if (!node) 300 return NULL; 301 302 node->flags = 0; 303 304 if (value) 305 memcpy(node->data + trie->data_size, value, 306 trie->map.value_size); 307 308 return node; 309 } 310 311 static int trie_check_add_elem(struct lpm_trie *trie, u64 flags) 312 { 313 if (flags == BPF_EXIST) 314 return -ENOENT; 315 if (trie->n_entries == trie->map.max_entries) 316 return -ENOSPC; 317 trie->n_entries++; 318 return 0; 319 } 320 321 /* Called from syscall or from eBPF program */ 322 static long trie_update_elem(struct bpf_map *map, 323 void *_key, void *value, u64 flags) 324 { 325 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); 326 struct lpm_trie_node *node, *im_node, *new_node; 327 struct lpm_trie_node *free_node = NULL; 328 struct lpm_trie_node __rcu **slot; 329 struct bpf_lpm_trie_key_u8 *key = _key; 330 unsigned long irq_flags; 331 unsigned int next_bit; 332 size_t matchlen = 0; 333 int ret = 0; 334 335 if (unlikely(flags > BPF_EXIST)) 336 return -EINVAL; 337 338 if (key->prefixlen > trie->max_prefixlen) 339 return -EINVAL; 340 341 /* Allocate and fill a new node */ 342 new_node = lpm_trie_node_alloc(trie, value); 343 if (!new_node) 344 return -ENOMEM; 345 346 ret = raw_res_spin_lock_irqsave(&trie->lock, irq_flags); 347 if (ret) 348 goto out_free; 349 350 new_node->prefixlen = key->prefixlen; 351 RCU_INIT_POINTER(new_node->child[0], NULL); 352 RCU_INIT_POINTER(new_node->child[1], NULL); 353 memcpy(new_node->data, key->data, trie->data_size); 354 355 /* Now find a slot to attach the new node. To do that, walk the tree 356 * from the root and match as many bits as possible for each node until 357 * we either find an empty slot or a slot that needs to be replaced by 358 * an intermediate node. 359 */ 360 slot = &trie->root; 361 362 while ((node = rcu_dereference(*slot))) { 363 matchlen = longest_prefix_match(trie, node, key); 364 365 if (node->prefixlen != matchlen || 366 node->prefixlen == key->prefixlen) 367 break; 368 369 next_bit = extract_bit(key->data, node->prefixlen); 370 slot = &node->child[next_bit]; 371 } 372 373 /* If the slot is empty (a free child pointer or an empty root), 374 * simply assign the @new_node to that slot and be done. 375 */ 376 if (!node) { 377 ret = trie_check_add_elem(trie, flags); 378 if (ret) 379 goto out; 380 381 rcu_assign_pointer(*slot, new_node); 382 goto out; 383 } 384 385 /* If the slot we picked already exists, replace it with @new_node 386 * which already has the correct data array set. 387 */ 388 if (node->prefixlen == matchlen) { 389 if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) { 390 if (flags == BPF_NOEXIST) { 391 ret = -EEXIST; 392 goto out; 393 } 394 } else { 395 ret = trie_check_add_elem(trie, flags); 396 if (ret) 397 goto out; 398 } 399 400 new_node->child[0] = node->child[0]; 401 new_node->child[1] = node->child[1]; 402 403 rcu_assign_pointer(*slot, new_node); 404 free_node = node; 405 406 goto out; 407 } 408 409 ret = trie_check_add_elem(trie, flags); 410 if (ret) 411 goto out; 412 413 /* If the new node matches the prefix completely, it must be inserted 414 * as an ancestor. Simply insert it between @node and *@slot. 415 */ 416 if (matchlen == key->prefixlen) { 417 next_bit = extract_bit(node->data, matchlen); 418 rcu_assign_pointer(new_node->child[next_bit], node); 419 rcu_assign_pointer(*slot, new_node); 420 goto out; 421 } 422 423 im_node = lpm_trie_node_alloc(trie, NULL); 424 if (!im_node) { 425 trie->n_entries--; 426 ret = -ENOMEM; 427 goto out; 428 } 429 430 im_node->prefixlen = matchlen; 431 im_node->flags |= LPM_TREE_NODE_FLAG_IM; 432 memcpy(im_node->data, node->data, trie->data_size); 433 434 /* Now determine which child to install in which slot */ 435 if (extract_bit(key->data, matchlen)) { 436 rcu_assign_pointer(im_node->child[0], node); 437 rcu_assign_pointer(im_node->child[1], new_node); 438 } else { 439 rcu_assign_pointer(im_node->child[0], new_node); 440 rcu_assign_pointer(im_node->child[1], node); 441 } 442 443 /* Finally, assign the intermediate node to the determined slot */ 444 rcu_assign_pointer(*slot, im_node); 445 446 out: 447 raw_res_spin_unlock_irqrestore(&trie->lock, irq_flags); 448 out_free: 449 if (ret) 450 bpf_mem_cache_free(&trie->ma, new_node); 451 bpf_mem_cache_free_rcu(&trie->ma, free_node); 452 453 return ret; 454 } 455 456 /* Called from syscall or from eBPF program */ 457 static long trie_delete_elem(struct bpf_map *map, void *_key) 458 { 459 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); 460 struct lpm_trie_node *free_node = NULL, *free_parent = NULL; 461 struct bpf_lpm_trie_key_u8 *key = _key; 462 struct lpm_trie_node __rcu **trim, **trim2; 463 struct lpm_trie_node *node, *parent; 464 unsigned long irq_flags; 465 unsigned int next_bit; 466 size_t matchlen = 0; 467 int ret = 0; 468 469 if (key->prefixlen > trie->max_prefixlen) 470 return -EINVAL; 471 472 ret = raw_res_spin_lock_irqsave(&trie->lock, irq_flags); 473 if (ret) 474 return ret; 475 476 /* Walk the tree looking for an exact key/length match and keeping 477 * track of the path we traverse. We will need to know the node 478 * we wish to delete, and the slot that points to the node we want 479 * to delete. We may also need to know the nodes parent and the 480 * slot that contains it. 481 */ 482 trim = &trie->root; 483 trim2 = trim; 484 parent = NULL; 485 while ((node = rcu_dereference(*trim))) { 486 matchlen = longest_prefix_match(trie, node, key); 487 488 if (node->prefixlen != matchlen || 489 node->prefixlen == key->prefixlen) 490 break; 491 492 parent = node; 493 trim2 = trim; 494 next_bit = extract_bit(key->data, node->prefixlen); 495 trim = &node->child[next_bit]; 496 } 497 498 if (!node || node->prefixlen != key->prefixlen || 499 node->prefixlen != matchlen || 500 (node->flags & LPM_TREE_NODE_FLAG_IM)) { 501 ret = -ENOENT; 502 goto out; 503 } 504 505 trie->n_entries--; 506 507 /* If the node we are removing has two children, simply mark it 508 * as intermediate and we are done. 509 */ 510 if (rcu_access_pointer(node->child[0]) && 511 rcu_access_pointer(node->child[1])) { 512 node->flags |= LPM_TREE_NODE_FLAG_IM; 513 goto out; 514 } 515 516 /* If the parent of the node we are about to delete is an intermediate 517 * node, and the deleted node doesn't have any children, we can delete 518 * the intermediate parent as well and promote its other child 519 * up the tree. Doing this maintains the invariant that all 520 * intermediate nodes have exactly 2 children and that there are no 521 * unnecessary intermediate nodes in the tree. 522 */ 523 if (parent && (parent->flags & LPM_TREE_NODE_FLAG_IM) && 524 !node->child[0] && !node->child[1]) { 525 if (node == rcu_access_pointer(parent->child[0])) 526 rcu_assign_pointer( 527 *trim2, rcu_access_pointer(parent->child[1])); 528 else 529 rcu_assign_pointer( 530 *trim2, rcu_access_pointer(parent->child[0])); 531 free_parent = parent; 532 free_node = node; 533 goto out; 534 } 535 536 /* The node we are removing has either zero or one child. If there 537 * is a child, move it into the removed node's slot then delete 538 * the node. Otherwise just clear the slot and delete the node. 539 */ 540 if (node->child[0]) 541 rcu_assign_pointer(*trim, rcu_access_pointer(node->child[0])); 542 else if (node->child[1]) 543 rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1])); 544 else 545 RCU_INIT_POINTER(*trim, NULL); 546 free_node = node; 547 548 out: 549 raw_res_spin_unlock_irqrestore(&trie->lock, irq_flags); 550 551 bpf_mem_cache_free_rcu(&trie->ma, free_parent); 552 bpf_mem_cache_free_rcu(&trie->ma, free_node); 553 554 return ret; 555 } 556 557 #define LPM_DATA_SIZE_MAX 256 558 #define LPM_DATA_SIZE_MIN 1 559 560 #define LPM_VAL_SIZE_MAX (KMALLOC_MAX_SIZE - LPM_DATA_SIZE_MAX - \ 561 sizeof(struct lpm_trie_node)) 562 #define LPM_VAL_SIZE_MIN 1 563 564 #define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key_u8) + (X)) 565 #define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) 566 #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) 567 568 #define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE | \ 569 BPF_F_ACCESS_MASK) 570 571 static struct bpf_map *trie_alloc(union bpf_attr *attr) 572 { 573 struct lpm_trie *trie; 574 size_t leaf_size; 575 int err; 576 577 /* check sanity of attributes */ 578 if (attr->max_entries == 0 || 579 !(attr->map_flags & BPF_F_NO_PREALLOC) || 580 attr->map_flags & ~LPM_CREATE_FLAG_MASK || 581 !bpf_map_flags_access_ok(attr->map_flags) || 582 attr->key_size < LPM_KEY_SIZE_MIN || 583 attr->key_size > LPM_KEY_SIZE_MAX || 584 attr->value_size < LPM_VAL_SIZE_MIN || 585 attr->value_size > LPM_VAL_SIZE_MAX) 586 return ERR_PTR(-EINVAL); 587 588 trie = bpf_map_area_alloc(sizeof(*trie), NUMA_NO_NODE); 589 if (!trie) 590 return ERR_PTR(-ENOMEM); 591 592 /* copy mandatory map attributes */ 593 bpf_map_init_from_attr(&trie->map, attr); 594 trie->data_size = attr->key_size - 595 offsetof(struct bpf_lpm_trie_key_u8, data); 596 trie->max_prefixlen = trie->data_size * 8; 597 598 raw_res_spin_lock_init(&trie->lock); 599 600 /* Allocate intermediate and leaf nodes from the same allocator */ 601 leaf_size = sizeof(struct lpm_trie_node) + trie->data_size + 602 trie->map.value_size; 603 err = bpf_mem_alloc_init(&trie->ma, leaf_size, false); 604 if (err) 605 goto free_out; 606 return &trie->map; 607 608 free_out: 609 bpf_map_area_free(trie); 610 return ERR_PTR(err); 611 } 612 613 static void trie_free(struct bpf_map *map) 614 { 615 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); 616 struct lpm_trie_node __rcu **slot; 617 struct lpm_trie_node *node; 618 619 /* Always start at the root and walk down to a node that has no 620 * children. Then free that node, nullify its reference in the parent 621 * and start over. 622 */ 623 624 for (;;) { 625 slot = &trie->root; 626 627 for (;;) { 628 node = rcu_dereference_protected(*slot, 1); 629 if (!node) 630 goto out; 631 632 if (rcu_access_pointer(node->child[0])) { 633 slot = &node->child[0]; 634 continue; 635 } 636 637 if (rcu_access_pointer(node->child[1])) { 638 slot = &node->child[1]; 639 continue; 640 } 641 642 /* No bpf program may access the map, so freeing the 643 * node without waiting for the extra RCU GP. 644 */ 645 bpf_mem_cache_raw_free(node); 646 RCU_INIT_POINTER(*slot, NULL); 647 break; 648 } 649 } 650 651 out: 652 bpf_mem_alloc_destroy(&trie->ma); 653 bpf_map_area_free(trie); 654 } 655 656 static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) 657 { 658 struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root; 659 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); 660 struct bpf_lpm_trie_key_u8 *key = _key, *next_key = _next_key; 661 struct lpm_trie_node **node_stack = NULL; 662 int err = 0, stack_ptr = -1; 663 unsigned int next_bit; 664 size_t matchlen = 0; 665 666 /* The get_next_key follows postorder. For the 4 node example in 667 * the top of this file, the trie_get_next_key() returns the following 668 * one after another: 669 * 192.168.0.0/24 670 * 192.168.1.0/24 671 * 192.168.128.0/24 672 * 192.168.0.0/16 673 * 674 * The idea is to return more specific keys before less specific ones. 675 */ 676 677 /* Empty trie */ 678 search_root = rcu_dereference(trie->root); 679 if (!search_root) 680 return -ENOENT; 681 682 /* For invalid key, find the leftmost node in the trie */ 683 if (!key || key->prefixlen > trie->max_prefixlen) 684 goto find_leftmost; 685 686 node_stack = kmalloc_array(trie->max_prefixlen + 1, 687 sizeof(struct lpm_trie_node *), 688 GFP_ATOMIC | __GFP_NOWARN); 689 if (!node_stack) 690 return -ENOMEM; 691 692 /* Try to find the exact node for the given key */ 693 for (node = search_root; node;) { 694 node_stack[++stack_ptr] = node; 695 matchlen = longest_prefix_match(trie, node, key); 696 if (node->prefixlen != matchlen || 697 node->prefixlen == key->prefixlen) 698 break; 699 700 next_bit = extract_bit(key->data, node->prefixlen); 701 node = rcu_dereference(node->child[next_bit]); 702 } 703 if (!node || node->prefixlen != matchlen || 704 (node->flags & LPM_TREE_NODE_FLAG_IM)) 705 goto find_leftmost; 706 707 /* The node with the exactly-matching key has been found, 708 * find the first node in postorder after the matched node. 709 */ 710 node = node_stack[stack_ptr]; 711 while (stack_ptr > 0) { 712 parent = node_stack[stack_ptr - 1]; 713 if (rcu_dereference(parent->child[0]) == node) { 714 search_root = rcu_dereference(parent->child[1]); 715 if (search_root) 716 goto find_leftmost; 717 } 718 if (!(parent->flags & LPM_TREE_NODE_FLAG_IM)) { 719 next_node = parent; 720 goto do_copy; 721 } 722 723 node = parent; 724 stack_ptr--; 725 } 726 727 /* did not find anything */ 728 err = -ENOENT; 729 goto free_stack; 730 731 find_leftmost: 732 /* Find the leftmost non-intermediate node, all intermediate nodes 733 * have exact two children, so this function will never return NULL. 734 */ 735 for (node = search_root; node;) { 736 if (node->flags & LPM_TREE_NODE_FLAG_IM) { 737 node = rcu_dereference(node->child[0]); 738 } else { 739 next_node = node; 740 node = rcu_dereference(node->child[0]); 741 if (!node) 742 node = rcu_dereference(next_node->child[1]); 743 } 744 } 745 do_copy: 746 next_key->prefixlen = next_node->prefixlen; 747 memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key_u8, data), 748 next_node->data, trie->data_size); 749 free_stack: 750 kfree(node_stack); 751 return err; 752 } 753 754 static int trie_check_btf(const struct bpf_map *map, 755 const struct btf *btf, 756 const struct btf_type *key_type, 757 const struct btf_type *value_type) 758 { 759 /* Keys must have struct bpf_lpm_trie_key_u8 embedded. */ 760 return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ? 761 -EINVAL : 0; 762 } 763 764 static u64 trie_mem_usage(const struct bpf_map *map) 765 { 766 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); 767 u64 elem_size; 768 769 elem_size = sizeof(struct lpm_trie_node) + trie->data_size + 770 trie->map.value_size; 771 return elem_size * READ_ONCE(trie->n_entries); 772 } 773 774 BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie) 775 const struct bpf_map_ops trie_map_ops = { 776 .map_meta_equal = bpf_map_meta_equal, 777 .map_alloc = trie_alloc, 778 .map_free = trie_free, 779 .map_get_next_key = trie_get_next_key, 780 .map_lookup_elem = trie_lookup_elem, 781 .map_update_elem = trie_update_elem, 782 .map_delete_elem = trie_delete_elem, 783 .map_lookup_batch = generic_map_lookup_batch, 784 .map_update_batch = generic_map_update_batch, 785 .map_delete_batch = generic_map_delete_batch, 786 .map_check_btf = trie_check_btf, 787 .map_mem_usage = trie_mem_usage, 788 .map_btf_id = &trie_map_btf_ids[0], 789 }; 790