1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <linux/zalloc.h> 5 #include "debug.h" 6 #include "dso.h" 7 #include "map.h" 8 #include "maps.h" 9 #include "rwsem.h" 10 #include "thread.h" 11 #include "ui/ui.h" 12 #include "unwind.h" 13 #include <internal/rc_check.h> 14 15 /* 16 * Locking/sorting note: 17 * 18 * Sorting is done with the write lock, iteration and binary searching happens 19 * under the read lock requiring being sorted. There is a race between sorting 20 * releasing the write lock and acquiring the read lock for iteration/searching 21 * where another thread could insert and break the sorting of the maps. In 22 * practice inserting maps should be rare meaning that the race shouldn't lead 23 * to live lock. Removal of maps doesn't break being sorted. 24 */ 25 26 DECLARE_RC_STRUCT(maps) { 27 struct rw_semaphore lock; 28 /** 29 * @maps_by_address: array of maps sorted by their starting address if 30 * maps_by_address_sorted is true. 31 */ 32 struct map **maps_by_address; 33 /** 34 * @maps_by_name: optional array of maps sorted by their dso name if 35 * maps_by_name_sorted is true. 36 */ 37 struct map **maps_by_name; 38 struct machine *machine; 39 #ifdef HAVE_LIBUNWIND_SUPPORT 40 void *addr_space; 41 const struct unwind_libunwind_ops *unwind_libunwind_ops; 42 #endif 43 refcount_t refcnt; 44 /** 45 * @nr_maps: number of maps_by_address, and possibly maps_by_name, 46 * entries that contain maps. 47 */ 48 unsigned int nr_maps; 49 /** 50 * @nr_maps_allocated: number of entries in maps_by_address and possibly 51 * maps_by_name. 52 */ 53 unsigned int nr_maps_allocated; 54 /** 55 * @last_search_by_name_idx: cache of last found by name entry's index 56 * as frequent searches for the same dso name are common. 57 */ 58 unsigned int last_search_by_name_idx; 59 /** @maps_by_address_sorted: is maps_by_address sorted. */ 60 bool maps_by_address_sorted; 61 /** @maps_by_name_sorted: is maps_by_name sorted. */ 62 bool maps_by_name_sorted; 63 /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ 64 bool ends_broken; 65 }; 66 67 static void check_invariants(const struct maps *maps __maybe_unused) 68 { 69 #ifndef NDEBUG 70 assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated); 71 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { 72 struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i]; 73 74 /* Check map is well-formed. */ 75 assert(map__end(map) == 0 || map__start(map) <= map__end(map)); 76 /* Expect at least 1 reference count. */ 77 assert(refcount_read(map__refcnt(map)) > 0); 78 79 if (map__dso(map) && dso__kernel(map__dso(map))) 80 assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps)); 81 82 if (i > 0) { 83 struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1]; 84 85 /* If addresses are sorted... */ 86 if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) { 87 /* Maps should be in start address order. */ 88 assert(map__start(prev) <= map__start(map)); 89 /* 90 * If the ends of maps aren't broken (during 91 * construction) then they should be ordered 92 * too. 93 */ 94 if (!RC_CHK_ACCESS(maps)->ends_broken) { 95 assert(map__end(prev) <= map__end(map)); 96 assert(map__end(prev) <= map__start(map) || 97 map__start(prev) == map__start(map)); 98 } 99 } 100 } 101 } 102 if (RC_CHK_ACCESS(maps)->maps_by_name) { 103 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { 104 struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i]; 105 106 /* 107 * Maps by name maps should be in maps_by_address, so 108 * the reference count should be higher. 109 */ 110 assert(refcount_read(map__refcnt(map)) > 1); 111 } 112 } 113 #endif 114 } 115 116 static struct map **maps__maps_by_address(const struct maps *maps) 117 { 118 return RC_CHK_ACCESS(maps)->maps_by_address; 119 } 120 121 static void maps__set_maps_by_address(struct maps *maps, struct map **new) 122 { 123 RC_CHK_ACCESS(maps)->maps_by_address = new; 124 125 } 126 127 static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated) 128 { 129 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated; 130 } 131 132 static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps) 133 { 134 RC_CHK_ACCESS(maps)->nr_maps = nr_maps; 135 } 136 137 /* Not in the header, to aid reference counting. */ 138 static struct map **maps__maps_by_name(const struct maps *maps) 139 { 140 return RC_CHK_ACCESS(maps)->maps_by_name; 141 142 } 143 144 static void maps__set_maps_by_name(struct maps *maps, struct map **new) 145 { 146 RC_CHK_ACCESS(maps)->maps_by_name = new; 147 148 } 149 150 static bool maps__maps_by_address_sorted(const struct maps *maps) 151 { 152 return RC_CHK_ACCESS(maps)->maps_by_address_sorted; 153 } 154 155 static void maps__set_maps_by_address_sorted(struct maps *maps, bool value) 156 { 157 RC_CHK_ACCESS(maps)->maps_by_address_sorted = value; 158 } 159 160 static bool maps__maps_by_name_sorted(const struct maps *maps) 161 { 162 return RC_CHK_ACCESS(maps)->maps_by_name_sorted; 163 } 164 165 static void maps__set_maps_by_name_sorted(struct maps *maps, bool value) 166 { 167 RC_CHK_ACCESS(maps)->maps_by_name_sorted = value; 168 } 169 170 struct machine *maps__machine(const struct maps *maps) 171 { 172 return RC_CHK_ACCESS(maps)->machine; 173 } 174 175 unsigned int maps__nr_maps(const struct maps *maps) 176 { 177 return RC_CHK_ACCESS(maps)->nr_maps; 178 } 179 180 refcount_t *maps__refcnt(struct maps *maps) 181 { 182 return &RC_CHK_ACCESS(maps)->refcnt; 183 } 184 185 #ifdef HAVE_LIBUNWIND_SUPPORT 186 void *maps__addr_space(const struct maps *maps) 187 { 188 return RC_CHK_ACCESS(maps)->addr_space; 189 } 190 191 void maps__set_addr_space(struct maps *maps, void *addr_space) 192 { 193 RC_CHK_ACCESS(maps)->addr_space = addr_space; 194 } 195 196 const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) 197 { 198 return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; 199 } 200 201 void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops) 202 { 203 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; 204 } 205 #endif 206 207 static struct rw_semaphore *maps__lock(struct maps *maps) 208 { 209 return &RC_CHK_ACCESS(maps)->lock; 210 } 211 212 static void maps__init(struct maps *maps, struct machine *machine) 213 { 214 init_rwsem(maps__lock(maps)); 215 RC_CHK_ACCESS(maps)->maps_by_address = NULL; 216 RC_CHK_ACCESS(maps)->maps_by_name = NULL; 217 RC_CHK_ACCESS(maps)->machine = machine; 218 #ifdef HAVE_LIBUNWIND_SUPPORT 219 RC_CHK_ACCESS(maps)->addr_space = NULL; 220 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL; 221 #endif 222 refcount_set(maps__refcnt(maps), 1); 223 RC_CHK_ACCESS(maps)->nr_maps = 0; 224 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0; 225 RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0; 226 RC_CHK_ACCESS(maps)->maps_by_address_sorted = true; 227 RC_CHK_ACCESS(maps)->maps_by_name_sorted = false; 228 } 229 230 static void maps__exit(struct maps *maps) 231 { 232 struct map **maps_by_address = maps__maps_by_address(maps); 233 struct map **maps_by_name = maps__maps_by_name(maps); 234 235 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 236 map__zput(maps_by_address[i]); 237 if (maps_by_name) 238 map__zput(maps_by_name[i]); 239 } 240 zfree(&maps_by_address); 241 zfree(&maps_by_name); 242 unwind__finish_access(maps); 243 } 244 245 struct maps *maps__new(struct machine *machine) 246 { 247 struct maps *result; 248 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps)); 249 250 if (ADD_RC_CHK(result, maps)) 251 maps__init(result, machine); 252 253 return result; 254 } 255 256 static void maps__delete(struct maps *maps) 257 { 258 maps__exit(maps); 259 RC_CHK_FREE(maps); 260 } 261 262 struct maps *maps__get(struct maps *maps) 263 { 264 struct maps *result; 265 266 if (RC_CHK_GET(result, maps)) 267 refcount_inc(maps__refcnt(maps)); 268 269 return result; 270 } 271 272 void maps__put(struct maps *maps) 273 { 274 if (maps && refcount_dec_and_test(maps__refcnt(maps))) 275 maps__delete(maps); 276 else 277 RC_CHK_PUT(maps); 278 } 279 280 static void __maps__free_maps_by_name(struct maps *maps) 281 { 282 if (!maps__maps_by_name(maps)) 283 return; 284 285 /* 286 * Free everything to try to do it from the rbtree in the next search 287 */ 288 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) 289 map__put(maps__maps_by_name(maps)[i]); 290 291 zfree(&RC_CHK_ACCESS(maps)->maps_by_name); 292 293 /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */ 294 maps__set_maps_by_name_sorted(maps, false); 295 } 296 297 static int map__start_cmp(const void *a, const void *b) 298 { 299 const struct map *map_a = *(const struct map * const *)a; 300 const struct map *map_b = *(const struct map * const *)b; 301 u64 map_a_start = map__start(map_a); 302 u64 map_b_start = map__start(map_b); 303 304 if (map_a_start == map_b_start) { 305 u64 map_a_end = map__end(map_a); 306 u64 map_b_end = map__end(map_b); 307 308 if (map_a_end == map_b_end) { 309 /* Ensure maps with the same addresses have a fixed order. */ 310 if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b)) 311 return 0; 312 return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b) 313 ? 1 : -1; 314 } 315 return map_a_end > map_b_end ? 1 : -1; 316 } 317 return map_a_start > map_b_start ? 1 : -1; 318 } 319 320 static void __maps__sort_by_address(struct maps *maps) 321 { 322 if (maps__maps_by_address_sorted(maps)) 323 return; 324 325 qsort(maps__maps_by_address(maps), 326 maps__nr_maps(maps), 327 sizeof(struct map *), 328 map__start_cmp); 329 maps__set_maps_by_address_sorted(maps, true); 330 } 331 332 static void maps__sort_by_address(struct maps *maps) 333 { 334 down_write(maps__lock(maps)); 335 __maps__sort_by_address(maps); 336 up_write(maps__lock(maps)); 337 } 338 339 static int map__strcmp(const void *a, const void *b) 340 { 341 const struct map *map_a = *(const struct map * const *)a; 342 const struct map *map_b = *(const struct map * const *)b; 343 const struct dso *dso_a = map__dso(map_a); 344 const struct dso *dso_b = map__dso(map_b); 345 int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b)); 346 347 if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) { 348 /* Ensure distinct but name equal maps have an order. */ 349 return map__start_cmp(a, b); 350 } 351 return ret; 352 } 353 354 static int maps__sort_by_name(struct maps *maps) 355 { 356 int err = 0; 357 358 down_write(maps__lock(maps)); 359 if (!maps__maps_by_name_sorted(maps)) { 360 struct map **maps_by_name = maps__maps_by_name(maps); 361 362 if (!maps_by_name) { 363 maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated * 364 sizeof(*maps_by_name)); 365 if (!maps_by_name) 366 err = -ENOMEM; 367 else { 368 struct map **maps_by_address = maps__maps_by_address(maps); 369 unsigned int n = maps__nr_maps(maps); 370 371 maps__set_maps_by_name(maps, maps_by_name); 372 for (unsigned int i = 0; i < n; i++) 373 maps_by_name[i] = map__get(maps_by_address[i]); 374 } 375 } 376 if (!err) { 377 qsort(maps_by_name, 378 maps__nr_maps(maps), 379 sizeof(struct map *), 380 map__strcmp); 381 maps__set_maps_by_name_sorted(maps, true); 382 } 383 } 384 check_invariants(maps); 385 up_write(maps__lock(maps)); 386 return err; 387 } 388 389 static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map) 390 { 391 struct map **maps_by_address = maps__maps_by_address(maps); 392 393 if (maps__maps_by_address_sorted(maps)) { 394 struct map **mapp = 395 bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps), 396 sizeof(*mapp), map__start_cmp); 397 398 if (mapp) 399 return mapp - maps_by_address; 400 } else { 401 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 402 if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map)) 403 return i; 404 } 405 } 406 pr_err("Map missing from maps"); 407 return -1; 408 } 409 410 static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map) 411 { 412 struct map **maps_by_name = maps__maps_by_name(maps); 413 414 if (maps__maps_by_name_sorted(maps)) { 415 struct map **mapp = 416 bsearch(&map, maps_by_name, maps__nr_maps(maps), 417 sizeof(*mapp), map__strcmp); 418 419 if (mapp) 420 return mapp - maps_by_name; 421 } else { 422 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 423 if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map)) 424 return i; 425 } 426 } 427 pr_err("Map missing from maps"); 428 return -1; 429 } 430 431 static void map__set_kmap_maps(struct map *map, struct maps *maps) 432 { 433 struct dso *dso; 434 435 if (map == NULL) 436 return; 437 438 dso = map__dso(map); 439 440 if (dso && dso__kernel(dso)) { 441 struct kmap *kmap = map__kmap(map); 442 443 if (kmap) 444 kmap->kmaps = maps; 445 else 446 pr_err("Internal error: kernel dso with non kernel map\n"); 447 } 448 } 449 450 static int __maps__insert(struct maps *maps, struct map *new) 451 { 452 struct map **maps_by_address = maps__maps_by_address(maps); 453 struct map **maps_by_name = maps__maps_by_name(maps); 454 unsigned int nr_maps = maps__nr_maps(maps); 455 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; 456 457 if (nr_maps + 1 > nr_allocate) { 458 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; 459 460 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new)); 461 if (!maps_by_address) 462 return -ENOMEM; 463 464 maps__set_maps_by_address(maps, maps_by_address); 465 if (maps_by_name) { 466 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new)); 467 if (!maps_by_name) { 468 /* 469 * If by name fails, just disable by name and it will 470 * recompute next time it is required. 471 */ 472 __maps__free_maps_by_name(maps); 473 } 474 maps__set_maps_by_name(maps, maps_by_name); 475 } 476 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; 477 } 478 /* Insert the value at the end. */ 479 maps_by_address[nr_maps] = map__get(new); 480 if (maps_by_name) 481 maps_by_name[nr_maps] = map__get(new); 482 483 nr_maps++; 484 RC_CHK_ACCESS(maps)->nr_maps = nr_maps; 485 486 /* 487 * Recompute if things are sorted. If things are inserted in a sorted 488 * manner, for example by processing /proc/pid/maps, then no 489 * sorting/resorting will be necessary. 490 */ 491 if (nr_maps == 1) { 492 /* If there's just 1 entry then maps are sorted. */ 493 maps__set_maps_by_address_sorted(maps, true); 494 maps__set_maps_by_name_sorted(maps, maps_by_name != NULL); 495 } else { 496 /* Sorted if maps were already sorted and this map starts after the last one. */ 497 maps__set_maps_by_address_sorted(maps, 498 maps__maps_by_address_sorted(maps) && 499 map__end(maps_by_address[nr_maps - 2]) <= map__start(new)); 500 maps__set_maps_by_name_sorted(maps, false); 501 } 502 if (map__end(new) < map__start(new)) 503 RC_CHK_ACCESS(maps)->ends_broken = true; 504 505 map__set_kmap_maps(new, maps); 506 507 return 0; 508 } 509 510 int maps__insert(struct maps *maps, struct map *map) 511 { 512 int ret; 513 514 down_write(maps__lock(maps)); 515 ret = __maps__insert(maps, map); 516 check_invariants(maps); 517 up_write(maps__lock(maps)); 518 return ret; 519 } 520 521 static void __maps__remove(struct maps *maps, struct map *map) 522 { 523 struct map **maps_by_address = maps__maps_by_address(maps); 524 struct map **maps_by_name = maps__maps_by_name(maps); 525 unsigned int nr_maps = maps__nr_maps(maps); 526 unsigned int address_idx; 527 528 /* Slide later mappings over the one to remove */ 529 address_idx = maps__by_address_index(maps, map); 530 map__put(maps_by_address[address_idx]); 531 memmove(&maps_by_address[address_idx], 532 &maps_by_address[address_idx + 1], 533 (nr_maps - address_idx - 1) * sizeof(*maps_by_address)); 534 535 if (maps_by_name) { 536 unsigned int name_idx = maps__by_name_index(maps, map); 537 538 map__put(maps_by_name[name_idx]); 539 memmove(&maps_by_name[name_idx], 540 &maps_by_name[name_idx + 1], 541 (nr_maps - name_idx - 1) * sizeof(*maps_by_name)); 542 } 543 544 --RC_CHK_ACCESS(maps)->nr_maps; 545 } 546 547 void maps__remove(struct maps *maps, struct map *map) 548 { 549 down_write(maps__lock(maps)); 550 __maps__remove(maps, map); 551 check_invariants(maps); 552 up_write(maps__lock(maps)); 553 } 554 555 bool maps__empty(struct maps *maps) 556 { 557 bool res; 558 559 down_read(maps__lock(maps)); 560 res = maps__nr_maps(maps) == 0; 561 up_read(maps__lock(maps)); 562 563 return res; 564 } 565 566 bool maps__equal(struct maps *a, struct maps *b) 567 { 568 return RC_CHK_EQUAL(a, b); 569 } 570 571 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data) 572 { 573 bool done = false; 574 int ret = 0; 575 576 /* See locking/sorting note. */ 577 while (!done) { 578 down_read(maps__lock(maps)); 579 if (maps__maps_by_address_sorted(maps)) { 580 /* 581 * maps__for_each_map callbacks may buggily/unsafely 582 * insert into maps_by_address. Deliberately reload 583 * maps__nr_maps and maps_by_address on each iteration 584 * to avoid using memory freed by maps__insert growing 585 * the array - this may cause maps to be skipped or 586 * repeated. 587 */ 588 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 589 struct map **maps_by_address = maps__maps_by_address(maps); 590 struct map *map = maps_by_address[i]; 591 592 ret = cb(map, data); 593 if (ret) 594 break; 595 } 596 done = true; 597 } 598 up_read(maps__lock(maps)); 599 if (!done) 600 maps__sort_by_address(maps); 601 } 602 return ret; 603 } 604 605 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data) 606 { 607 struct map **maps_by_address; 608 609 down_write(maps__lock(maps)); 610 611 maps_by_address = maps__maps_by_address(maps); 612 for (unsigned int i = 0; i < maps__nr_maps(maps);) { 613 if (cb(maps_by_address[i], data)) 614 __maps__remove(maps, maps_by_address[i]); 615 else 616 i++; 617 } 618 check_invariants(maps); 619 up_write(maps__lock(maps)); 620 } 621 622 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) 623 { 624 struct map *map = maps__find(maps, addr); 625 struct symbol *result = NULL; 626 627 /* Ensure map is loaded before using map->map_ip */ 628 if (map != NULL && map__load(map) >= 0) 629 result = map__find_symbol(map, map__map_ip(map, addr)); 630 631 if (mapp) 632 *mapp = map; 633 else 634 map__put(map); 635 636 return result; 637 } 638 639 struct maps__find_symbol_by_name_args { 640 struct map **mapp; 641 const char *name; 642 struct symbol *sym; 643 }; 644 645 static int maps__find_symbol_by_name_cb(struct map *map, void *data) 646 { 647 struct maps__find_symbol_by_name_args *args = data; 648 649 args->sym = map__find_symbol_by_name(map, args->name); 650 if (!args->sym) 651 return 0; 652 653 if (!map__contains_symbol(map, args->sym)) { 654 args->sym = NULL; 655 return 0; 656 } 657 658 if (args->mapp != NULL) 659 *args->mapp = map__get(map); 660 return 1; 661 } 662 663 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) 664 { 665 struct maps__find_symbol_by_name_args args = { 666 .mapp = mapp, 667 .name = name, 668 .sym = NULL, 669 }; 670 671 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args); 672 return args.sym; 673 } 674 675 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams) 676 { 677 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) { 678 if (maps == NULL) 679 return -1; 680 ams->ms.map = maps__find(maps, ams->addr); 681 if (ams->ms.map == NULL) 682 return -1; 683 } 684 685 ams->al_addr = map__map_ip(ams->ms.map, ams->addr); 686 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr); 687 688 return ams->ms.sym ? 0 : -1; 689 } 690 691 struct maps__fprintf_args { 692 FILE *fp; 693 size_t printed; 694 }; 695 696 static int maps__fprintf_cb(struct map *map, void *data) 697 { 698 struct maps__fprintf_args *args = data; 699 700 args->printed += fprintf(args->fp, "Map:"); 701 args->printed += map__fprintf(map, args->fp); 702 if (verbose > 2) { 703 args->printed += dso__fprintf(map__dso(map), args->fp); 704 args->printed += fprintf(args->fp, "--\n"); 705 } 706 return 0; 707 } 708 709 size_t maps__fprintf(struct maps *maps, FILE *fp) 710 { 711 struct maps__fprintf_args args = { 712 .fp = fp, 713 .printed = 0, 714 }; 715 716 maps__for_each_map(maps, maps__fprintf_cb, &args); 717 718 return args.printed; 719 } 720 721 /* 722 * Find first map where end > map->start. 723 * Same as find_vma() in kernel. 724 */ 725 static unsigned int first_ending_after(struct maps *maps, const struct map *map) 726 { 727 struct map **maps_by_address = maps__maps_by_address(maps); 728 int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1; 729 730 assert(maps__maps_by_address_sorted(maps)); 731 if (low <= high && map__end(maps_by_address[0]) > map__start(map)) 732 return 0; 733 734 while (low <= high) { 735 int mid = (low + high) / 2; 736 struct map *pos = maps_by_address[mid]; 737 738 if (map__end(pos) > map__start(map)) { 739 first = mid; 740 if (map__start(pos) <= map__start(map)) { 741 /* Entry overlaps map. */ 742 break; 743 } 744 high = mid - 1; 745 } else 746 low = mid + 1; 747 } 748 return first; 749 } 750 751 static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index, 752 struct map *new1, struct map *new2) 753 { 754 struct map **maps_by_address = maps__maps_by_address(maps); 755 struct map **maps_by_name = maps__maps_by_name(maps); 756 unsigned int nr_maps = maps__nr_maps(maps); 757 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; 758 unsigned int to_add = new2 ? 2 : 1; 759 760 assert(maps__maps_by_address_sorted(maps)); 761 assert(first_after_index == nr_maps || 762 map__end(new1) <= map__start(maps_by_address[first_after_index])); 763 assert(!new2 || map__end(new1) <= map__start(new2)); 764 assert(first_after_index == nr_maps || !new2 || 765 map__end(new2) <= map__start(maps_by_address[first_after_index])); 766 767 if (nr_maps + to_add > nr_allocate) { 768 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; 769 770 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1)); 771 if (!maps_by_address) 772 return -ENOMEM; 773 774 maps__set_maps_by_address(maps, maps_by_address); 775 if (maps_by_name) { 776 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1)); 777 if (!maps_by_name) { 778 /* 779 * If by name fails, just disable by name and it will 780 * recompute next time it is required. 781 */ 782 __maps__free_maps_by_name(maps); 783 } 784 maps__set_maps_by_name(maps, maps_by_name); 785 } 786 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; 787 } 788 memmove(&maps_by_address[first_after_index+to_add], 789 &maps_by_address[first_after_index], 790 (nr_maps - first_after_index) * sizeof(new1)); 791 maps_by_address[first_after_index] = map__get(new1); 792 if (maps_by_name) 793 maps_by_name[nr_maps] = map__get(new1); 794 if (new2) { 795 maps_by_address[first_after_index + 1] = map__get(new2); 796 if (maps_by_name) 797 maps_by_name[nr_maps + 1] = map__get(new2); 798 } 799 RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add; 800 maps__set_maps_by_name_sorted(maps, false); 801 map__set_kmap_maps(new1, maps); 802 map__set_kmap_maps(new2, maps); 803 804 check_invariants(maps); 805 return 0; 806 } 807 808 /* 809 * Adds new to maps, if new overlaps existing entries then the existing maps are 810 * adjusted or removed so that new fits without overlapping any entries. 811 */ 812 static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) 813 { 814 int err = 0; 815 FILE *fp = debug_file(); 816 unsigned int i, ni = INT_MAX; // Some gcc complain, but depends on maps_by_name... 817 818 if (!maps__maps_by_address_sorted(maps)) 819 __maps__sort_by_address(maps); 820 821 /* 822 * Iterate through entries where the end of the existing entry is 823 * greater-than the new map's start. 824 */ 825 for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) { 826 struct map **maps_by_address = maps__maps_by_address(maps); 827 struct map **maps_by_name = maps__maps_by_name(maps); 828 struct map *pos = maps_by_address[i]; 829 struct map *before = NULL, *after = NULL; 830 831 /* 832 * Stop if current map starts after map->end. 833 * Maps are ordered by start: next will not overlap for sure. 834 */ 835 if (map__start(pos) >= map__end(new)) 836 break; 837 838 if (use_browser) { 839 pr_debug("overlapping maps in %s (disable tui for more info)\n", 840 dso__name(map__dso(new))); 841 } else if (verbose >= 2) { 842 pr_debug("overlapping maps:\n"); 843 map__fprintf(new, fp); 844 map__fprintf(pos, fp); 845 } 846 847 if (maps_by_name) 848 ni = maps__by_name_index(maps, pos); 849 850 /* 851 * Now check if we need to create new maps for areas not 852 * overlapped by the new map: 853 */ 854 if (map__start(new) > map__start(pos)) { 855 /* Map starts within existing map. Need to shorten the existing map. */ 856 before = map__clone(pos); 857 858 if (before == NULL) { 859 err = -ENOMEM; 860 goto out_err; 861 } 862 map__set_end(before, map__start(new)); 863 864 if (verbose >= 2 && !use_browser) 865 map__fprintf(before, fp); 866 } 867 if (map__end(new) < map__end(pos)) { 868 /* The new map isn't as long as the existing map. */ 869 after = map__clone(pos); 870 871 if (after == NULL) { 872 map__zput(before); 873 err = -ENOMEM; 874 goto out_err; 875 } 876 877 map__set_start(after, map__end(new)); 878 map__add_pgoff(after, map__end(new) - map__start(pos)); 879 assert(map__map_ip(pos, map__end(new)) == 880 map__map_ip(after, map__end(new))); 881 882 if (verbose >= 2 && !use_browser) 883 map__fprintf(after, fp); 884 } 885 /* 886 * If adding one entry, for `before` or `after`, we can replace 887 * the existing entry. If both `before` and `after` are 888 * necessary than an insert is needed. If the existing entry 889 * entirely overlaps the existing entry it can just be removed. 890 */ 891 if (before) { 892 map__put(maps_by_address[i]); 893 maps_by_address[i] = before; 894 895 if (maps_by_name) { 896 map__put(maps_by_name[ni]); 897 maps_by_name[ni] = map__get(before); 898 } 899 900 /* Maps are still ordered, go to next one. */ 901 i++; 902 if (after) { 903 /* 904 * 'before' and 'after' mean 'new' split the 905 * 'pos' mapping and therefore there are no 906 * later mappings. 907 */ 908 err = __maps__insert_sorted(maps, i, new, after); 909 map__put(after); 910 check_invariants(maps); 911 return err; 912 } 913 check_invariants(maps); 914 } else if (after) { 915 /* 916 * 'after' means 'new' split 'pos' and there are no 917 * later mappings. 918 */ 919 map__put(maps_by_address[i]); 920 maps_by_address[i] = map__get(new); 921 922 if (maps_by_name) { 923 map__put(maps_by_name[ni]); 924 maps_by_name[ni] = map__get(new); 925 } 926 927 err = __maps__insert_sorted(maps, i + 1, after, NULL); 928 map__put(after); 929 check_invariants(maps); 930 return err; 931 } else { 932 struct map *next = NULL; 933 934 if (i + 1 < maps__nr_maps(maps)) 935 next = maps_by_address[i + 1]; 936 937 if (!next || map__start(next) >= map__end(new)) { 938 /* 939 * Replace existing mapping and end knowing 940 * there aren't later overlapping or any 941 * mappings. 942 */ 943 map__put(maps_by_address[i]); 944 maps_by_address[i] = map__get(new); 945 946 if (maps_by_name) { 947 map__put(maps_by_name[ni]); 948 maps_by_name[ni] = map__get(new); 949 } 950 951 map__set_kmap_maps(new, maps); 952 953 check_invariants(maps); 954 return err; 955 } 956 __maps__remove(maps, pos); 957 check_invariants(maps); 958 /* 959 * Maps are ordered but no need to increase `i` as the 960 * later maps were moved down. 961 */ 962 } 963 } 964 /* Add the map. */ 965 err = __maps__insert_sorted(maps, i, new, NULL); 966 out_err: 967 return err; 968 } 969 970 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) 971 { 972 int err; 973 974 down_write(maps__lock(maps)); 975 err = __maps__fixup_overlap_and_insert(maps, new); 976 up_write(maps__lock(maps)); 977 return err; 978 } 979 980 int maps__copy_from(struct maps *dest, struct maps *parent) 981 { 982 /* Note, if struct map were immutable then cloning could use ref counts. */ 983 struct map **parent_maps_by_address; 984 int err = 0; 985 unsigned int n; 986 987 down_write(maps__lock(dest)); 988 down_read(maps__lock(parent)); 989 990 parent_maps_by_address = maps__maps_by_address(parent); 991 n = maps__nr_maps(parent); 992 if (maps__nr_maps(dest) == 0) { 993 /* No existing mappings so just copy from parent to avoid reallocs in insert. */ 994 unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated; 995 struct map **dest_maps_by_address = 996 malloc(nr_maps_allocated * sizeof(struct map *)); 997 struct map **dest_maps_by_name = NULL; 998 999 if (!dest_maps_by_address) 1000 err = -ENOMEM; 1001 else { 1002 if (maps__maps_by_name(parent)) { 1003 dest_maps_by_name = 1004 malloc(nr_maps_allocated * sizeof(struct map *)); 1005 } 1006 1007 RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address; 1008 RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name; 1009 RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated; 1010 } 1011 1012 for (unsigned int i = 0; !err && i < n; i++) { 1013 struct map *pos = parent_maps_by_address[i]; 1014 struct map *new = map__clone(pos); 1015 1016 if (!new) 1017 err = -ENOMEM; 1018 else { 1019 err = unwind__prepare_access(dest, new, NULL); 1020 if (!err) { 1021 dest_maps_by_address[i] = new; 1022 if (dest_maps_by_name) 1023 dest_maps_by_name[i] = map__get(new); 1024 RC_CHK_ACCESS(dest)->nr_maps = i + 1; 1025 } 1026 } 1027 if (err) 1028 map__put(new); 1029 } 1030 maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent)); 1031 if (!err) { 1032 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 1033 RC_CHK_ACCESS(parent)->last_search_by_name_idx; 1034 maps__set_maps_by_name_sorted(dest, 1035 dest_maps_by_name && 1036 maps__maps_by_name_sorted(parent)); 1037 } else { 1038 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0; 1039 maps__set_maps_by_name_sorted(dest, false); 1040 } 1041 } else { 1042 /* Unexpected copying to a maps containing entries. */ 1043 for (unsigned int i = 0; !err && i < n; i++) { 1044 struct map *pos = parent_maps_by_address[i]; 1045 struct map *new = map__clone(pos); 1046 1047 if (!new) 1048 err = -ENOMEM; 1049 else { 1050 err = unwind__prepare_access(dest, new, NULL); 1051 if (!err) 1052 err = __maps__insert(dest, new); 1053 } 1054 map__put(new); 1055 } 1056 } 1057 check_invariants(dest); 1058 1059 up_read(maps__lock(parent)); 1060 up_write(maps__lock(dest)); 1061 return err; 1062 } 1063 1064 static int map__addr_cmp(const void *key, const void *entry) 1065 { 1066 const u64 ip = *(const u64 *)key; 1067 const struct map *map = *(const struct map * const *)entry; 1068 1069 if (ip < map__start(map)) 1070 return -1; 1071 if (ip >= map__end(map)) 1072 return 1; 1073 return 0; 1074 } 1075 1076 struct map *maps__find(struct maps *maps, u64 ip) 1077 { 1078 struct map *result = NULL; 1079 bool done = false; 1080 1081 /* See locking/sorting note. */ 1082 while (!done) { 1083 down_read(maps__lock(maps)); 1084 if (maps__maps_by_address_sorted(maps)) { 1085 struct map **mapp = NULL; 1086 struct map **maps_by_address = maps__maps_by_address(maps); 1087 unsigned int nr_maps = maps__nr_maps(maps); 1088 1089 if (maps_by_address && nr_maps) 1090 mapp = bsearch(&ip, maps_by_address, nr_maps, sizeof(*mapp), 1091 map__addr_cmp); 1092 if (mapp) 1093 result = map__get(*mapp); 1094 done = true; 1095 } 1096 up_read(maps__lock(maps)); 1097 if (!done) 1098 maps__sort_by_address(maps); 1099 } 1100 return result; 1101 } 1102 1103 static int map__strcmp_name(const void *name, const void *b) 1104 { 1105 const struct dso *dso = map__dso(*(const struct map **)b); 1106 1107 return strcmp(name, dso__short_name(dso)); 1108 } 1109 1110 struct map *maps__find_by_name(struct maps *maps, const char *name) 1111 { 1112 struct map *result = NULL; 1113 bool done = false; 1114 1115 /* See locking/sorting note. */ 1116 while (!done) { 1117 unsigned int i; 1118 1119 down_read(maps__lock(maps)); 1120 1121 /* First check last found entry. */ 1122 i = RC_CHK_ACCESS(maps)->last_search_by_name_idx; 1123 if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) { 1124 struct dso *dso = map__dso(maps__maps_by_name(maps)[i]); 1125 1126 if (dso && strcmp(dso__short_name(dso), name) == 0) { 1127 result = map__get(maps__maps_by_name(maps)[i]); 1128 done = true; 1129 } 1130 } 1131 1132 /* Second search sorted array. */ 1133 if (!done && maps__maps_by_name_sorted(maps)) { 1134 struct map **mapp = 1135 bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), 1136 sizeof(*mapp), map__strcmp_name); 1137 1138 if (mapp) { 1139 result = map__get(*mapp); 1140 i = mapp - maps__maps_by_name(maps); 1141 RC_CHK_ACCESS(maps)->last_search_by_name_idx = i; 1142 } 1143 done = true; 1144 } 1145 up_read(maps__lock(maps)); 1146 if (!done) { 1147 /* Sort and retry binary search. */ 1148 if (maps__sort_by_name(maps)) { 1149 /* 1150 * Memory allocation failed do linear search 1151 * through address sorted maps. 1152 */ 1153 struct map **maps_by_address; 1154 unsigned int n; 1155 1156 down_read(maps__lock(maps)); 1157 maps_by_address = maps__maps_by_address(maps); 1158 n = maps__nr_maps(maps); 1159 for (i = 0; i < n; i++) { 1160 struct map *pos = maps_by_address[i]; 1161 struct dso *dso = map__dso(pos); 1162 1163 if (dso && strcmp(dso__short_name(dso), name) == 0) { 1164 result = map__get(pos); 1165 break; 1166 } 1167 } 1168 up_read(maps__lock(maps)); 1169 done = true; 1170 } 1171 } 1172 } 1173 return result; 1174 } 1175 1176 struct map *maps__find_next_entry(struct maps *maps, struct map *map) 1177 { 1178 unsigned int i; 1179 struct map *result = NULL; 1180 1181 down_read(maps__lock(maps)); 1182 while (!maps__maps_by_address_sorted(maps)) { 1183 up_read(maps__lock(maps)); 1184 maps__sort_by_address(maps); 1185 down_read(maps__lock(maps)); 1186 } 1187 i = maps__by_address_index(maps, map); 1188 if (++i < maps__nr_maps(maps)) 1189 result = map__get(maps__maps_by_address(maps)[i]); 1190 1191 up_read(maps__lock(maps)); 1192 return result; 1193 } 1194 1195 void maps__fixup_end(struct maps *maps) 1196 { 1197 struct map **maps_by_address; 1198 unsigned int n; 1199 1200 down_write(maps__lock(maps)); 1201 if (!maps__maps_by_address_sorted(maps)) 1202 __maps__sort_by_address(maps); 1203 1204 maps_by_address = maps__maps_by_address(maps); 1205 n = maps__nr_maps(maps); 1206 for (unsigned int i = 1; i < n; i++) { 1207 struct map *prev = maps_by_address[i - 1]; 1208 struct map *curr = maps_by_address[i]; 1209 1210 if (!map__end(prev) || map__end(prev) > map__start(curr)) 1211 map__set_end(prev, map__start(curr)); 1212 } 1213 1214 /* 1215 * We still haven't the actual symbols, so guess the 1216 * last map final address. 1217 */ 1218 if (n > 0 && !map__end(maps_by_address[n - 1])) 1219 map__set_end(maps_by_address[n - 1], ~0ULL); 1220 1221 RC_CHK_ACCESS(maps)->ends_broken = false; 1222 check_invariants(maps); 1223 1224 up_write(maps__lock(maps)); 1225 } 1226 1227 /* 1228 * Merges map into maps by splitting the new map within the existing map 1229 * regions. 1230 */ 1231 int maps__merge_in(struct maps *kmaps, struct map *new_map) 1232 { 1233 unsigned int first_after_, kmaps__nr_maps; 1234 struct map **kmaps_maps_by_address; 1235 struct map **merged_maps_by_address; 1236 unsigned int merged_nr_maps_allocated; 1237 1238 /* First try under a read lock. */ 1239 while (true) { 1240 down_read(maps__lock(kmaps)); 1241 if (maps__maps_by_address_sorted(kmaps)) 1242 break; 1243 1244 up_read(maps__lock(kmaps)); 1245 1246 /* First after binary search requires sorted maps. Sort and try again. */ 1247 maps__sort_by_address(kmaps); 1248 } 1249 first_after_ = first_ending_after(kmaps, new_map); 1250 kmaps_maps_by_address = maps__maps_by_address(kmaps); 1251 1252 if (first_after_ >= maps__nr_maps(kmaps) || 1253 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { 1254 /* No overlap so regular insert suffices. */ 1255 up_read(maps__lock(kmaps)); 1256 return maps__insert(kmaps, new_map); 1257 } 1258 up_read(maps__lock(kmaps)); 1259 1260 /* Plain insert with a read-lock failed, try again now with the write lock. */ 1261 down_write(maps__lock(kmaps)); 1262 if (!maps__maps_by_address_sorted(kmaps)) 1263 __maps__sort_by_address(kmaps); 1264 1265 first_after_ = first_ending_after(kmaps, new_map); 1266 kmaps_maps_by_address = maps__maps_by_address(kmaps); 1267 kmaps__nr_maps = maps__nr_maps(kmaps); 1268 1269 if (first_after_ >= kmaps__nr_maps || 1270 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { 1271 /* No overlap so regular insert suffices. */ 1272 int ret = __maps__insert(kmaps, new_map); 1273 1274 check_invariants(kmaps); 1275 up_write(maps__lock(kmaps)); 1276 return ret; 1277 } 1278 /* Array to merge into, possibly 1 more for the sake of new_map. */ 1279 merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated; 1280 if (kmaps__nr_maps + 1 == merged_nr_maps_allocated) 1281 merged_nr_maps_allocated++; 1282 1283 merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address)); 1284 if (!merged_maps_by_address) { 1285 up_write(maps__lock(kmaps)); 1286 return -ENOMEM; 1287 } 1288 maps__set_maps_by_address(kmaps, merged_maps_by_address); 1289 maps__set_maps_by_address_sorted(kmaps, true); 1290 __maps__free_maps_by_name(kmaps); 1291 maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated); 1292 1293 /* Copy entries before the new_map that can't overlap. */ 1294 for (unsigned int i = 0; i < first_after_; i++) 1295 merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]); 1296 1297 maps__set_nr_maps(kmaps, first_after_); 1298 1299 /* Add the new map, it will be split when the later overlapping mappings are added. */ 1300 __maps__insert(kmaps, new_map); 1301 1302 /* Insert mappings after new_map, splitting new_map in the process. */ 1303 for (unsigned int i = first_after_; i < kmaps__nr_maps; i++) 1304 __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]); 1305 1306 /* Copy the maps from merged into kmaps. */ 1307 for (unsigned int i = 0; i < kmaps__nr_maps; i++) 1308 map__zput(kmaps_maps_by_address[i]); 1309 1310 free(kmaps_maps_by_address); 1311 check_invariants(kmaps); 1312 up_write(maps__lock(kmaps)); 1313 return 0; 1314 } 1315 1316 void maps__load_first(struct maps *maps) 1317 { 1318 down_read(maps__lock(maps)); 1319 1320 if (maps__nr_maps(maps) > 0) 1321 map__load(maps__maps_by_address(maps)[0]); 1322 1323 up_read(maps__lock(maps)); 1324 } 1325