1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Device tree based initialization code for reserved memory. 4 * 5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. 6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. 7 * http://www.samsung.com 8 * Author: Marek Szyprowski <m.szyprowski@samsung.com> 9 * Author: Josh Cartwright <joshc@codeaurora.org> 10 */ 11 12 #define pr_fmt(fmt) "OF: reserved mem: " fmt 13 14 #include <linux/err.h> 15 #include <linux/libfdt.h> 16 #include <linux/of.h> 17 #include <linux/of_fdt.h> 18 #include <linux/of_platform.h> 19 #include <linux/mm.h> 20 #include <linux/sizes.h> 21 #include <linux/of_reserved_mem.h> 22 #include <linux/sort.h> 23 #include <linux/slab.h> 24 #include <linux/memblock.h> 25 #include <linux/kmemleak.h> 26 #include <linux/cma.h> 27 28 #include "of_private.h" 29 30 static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata; 31 static struct reserved_mem *reserved_mem __refdata = reserved_mem_array; 32 static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS; 33 static int reserved_mem_count; 34 35 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 36 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 37 phys_addr_t *res_base) 38 { 39 phys_addr_t base; 40 int err = 0; 41 42 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; 43 align = !align ? SMP_CACHE_BYTES : align; 44 base = memblock_phys_alloc_range(size, align, start, end); 45 if (!base) 46 return -ENOMEM; 47 48 *res_base = base; 49 if (nomap) { 50 err = memblock_mark_nomap(base, size); 51 if (err) 52 memblock_phys_free(base, size); 53 } 54 55 if (!err) 56 kmemleak_ignore_phys(base); 57 58 return err; 59 } 60 61 /* 62 * alloc_reserved_mem_array() - allocate memory for the reserved_mem 63 * array using memblock 64 * 65 * This function is used to allocate memory for the reserved_mem 66 * array according to the total number of reserved memory regions 67 * defined in the DT. 68 * After the new array is allocated, the information stored in 69 * the initial static array is copied over to this new array and 70 * the new array is used from this point on. 71 */ 72 static void __init alloc_reserved_mem_array(void) 73 { 74 struct reserved_mem *new_array; 75 size_t alloc_size, copy_size, memset_size; 76 77 alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array)); 78 if (alloc_size == SIZE_MAX) { 79 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW); 80 return; 81 } 82 83 new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 84 if (!new_array) { 85 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM); 86 return; 87 } 88 89 copy_size = array_size(reserved_mem_count, sizeof(*new_array)); 90 if (copy_size == SIZE_MAX) { 91 memblock_free(new_array, alloc_size); 92 total_reserved_mem_cnt = MAX_RESERVED_REGIONS; 93 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW); 94 return; 95 } 96 97 memset_size = alloc_size - copy_size; 98 99 memcpy(new_array, reserved_mem, copy_size); 100 memset(new_array + reserved_mem_count, 0, memset_size); 101 102 reserved_mem = new_array; 103 } 104 105 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem); 106 /* 107 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization 108 */ 109 static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, 110 phys_addr_t base, phys_addr_t size) 111 { 112 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; 113 114 if (reserved_mem_count == total_reserved_mem_cnt) { 115 pr_err("not enough space for all defined regions.\n"); 116 return; 117 } 118 119 rmem->fdt_node = node; 120 rmem->name = uname; 121 rmem->base = base; 122 rmem->size = size; 123 124 /* Call the region specific initialization function */ 125 fdt_init_reserved_mem_node(rmem); 126 127 reserved_mem_count++; 128 return; 129 } 130 131 static int __init early_init_dt_reserve_memory(phys_addr_t base, 132 phys_addr_t size, bool nomap) 133 { 134 if (nomap) { 135 /* 136 * If the memory is already reserved (by another region), we 137 * should not allow it to be marked nomap, but don't worry 138 * if the region isn't memory as it won't be mapped. 139 */ 140 if (memblock_overlaps_region(&memblock.memory, base, size) && 141 memblock_is_region_reserved(base, size)) 142 return -EBUSY; 143 144 return memblock_mark_nomap(base, size); 145 } 146 return memblock_reserve(base, size); 147 } 148 149 /* 150 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property 151 */ 152 static int __init __reserved_mem_reserve_reg(unsigned long node, 153 const char *uname) 154 { 155 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 156 phys_addr_t base, size; 157 int len; 158 const __be32 *prop; 159 bool nomap; 160 161 prop = of_get_flat_dt_prop(node, "reg", &len); 162 if (!prop) 163 return -ENOENT; 164 165 if (len && len % t_len != 0) { 166 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", 167 uname); 168 return -EINVAL; 169 } 170 171 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 172 173 while (len >= t_len) { 174 base = dt_mem_next_cell(dt_root_addr_cells, &prop); 175 size = dt_mem_next_cell(dt_root_size_cells, &prop); 176 177 if (size && 178 early_init_dt_reserve_memory(base, size, nomap) == 0) 179 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", 180 uname, &base, (unsigned long)(size / SZ_1M)); 181 else 182 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", 183 uname, &base, (unsigned long)(size / SZ_1M)); 184 185 len -= t_len; 186 } 187 return 0; 188 } 189 190 /* 191 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided 192 * in /reserved-memory matches the values supported by the current implementation, 193 * also check if ranges property has been provided 194 */ 195 static int __init __reserved_mem_check_root(unsigned long node) 196 { 197 const __be32 *prop; 198 199 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 200 if (!prop || be32_to_cpup(prop) != dt_root_size_cells) 201 return -EINVAL; 202 203 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 204 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells) 205 return -EINVAL; 206 207 prop = of_get_flat_dt_prop(node, "ranges", NULL); 208 if (!prop) 209 return -EINVAL; 210 return 0; 211 } 212 213 static void __init __rmem_check_for_overlap(void); 214 215 /** 216 * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined 217 * reserved memory regions. 218 * 219 * This function is used to scan through the DT and store the 220 * information for the reserved memory regions that are defined using 221 * the "reg" property. The region node number, name, base address, and 222 * size are all stored in the reserved_mem array by calling the 223 * fdt_reserved_mem_save_node() function. 224 */ 225 void __init fdt_scan_reserved_mem_reg_nodes(void) 226 { 227 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 228 const void *fdt = initial_boot_params; 229 phys_addr_t base, size; 230 const __be32 *prop; 231 int node, child; 232 int len; 233 234 if (!fdt) 235 return; 236 237 node = fdt_path_offset(fdt, "/reserved-memory"); 238 if (node < 0) { 239 pr_info("Reserved memory: No reserved-memory node in the DT\n"); 240 return; 241 } 242 243 /* Attempt dynamic allocation of a new reserved_mem array */ 244 alloc_reserved_mem_array(); 245 246 if (__reserved_mem_check_root(node)) { 247 pr_err("Reserved memory: unsupported node format, ignoring\n"); 248 return; 249 } 250 251 fdt_for_each_subnode(child, fdt, node) { 252 const char *uname; 253 254 prop = of_get_flat_dt_prop(child, "reg", &len); 255 if (!prop) 256 continue; 257 if (!of_fdt_device_is_available(fdt, child)) 258 continue; 259 260 uname = fdt_get_name(fdt, child, NULL); 261 if (len && len % t_len != 0) { 262 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", 263 uname); 264 continue; 265 } 266 267 if (len > t_len) 268 pr_warn("%s() ignores %d regions in node '%s'\n", 269 __func__, len / t_len - 1, uname); 270 271 base = dt_mem_next_cell(dt_root_addr_cells, &prop); 272 size = dt_mem_next_cell(dt_root_size_cells, &prop); 273 274 if (size) 275 fdt_reserved_mem_save_node(child, uname, base, size); 276 } 277 278 /* check for overlapping reserved regions */ 279 __rmem_check_for_overlap(); 280 } 281 282 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname); 283 284 /* 285 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory 286 */ 287 int __init fdt_scan_reserved_mem(void) 288 { 289 int node, child; 290 int dynamic_nodes_cnt = 0, count = 0; 291 int dynamic_nodes[MAX_RESERVED_REGIONS]; 292 const void *fdt = initial_boot_params; 293 294 node = fdt_path_offset(fdt, "/reserved-memory"); 295 if (node < 0) 296 return -ENODEV; 297 298 if (__reserved_mem_check_root(node) != 0) { 299 pr_err("Reserved memory: unsupported node format, ignoring\n"); 300 return -EINVAL; 301 } 302 303 fdt_for_each_subnode(child, fdt, node) { 304 const char *uname; 305 int err; 306 307 if (!of_fdt_device_is_available(fdt, child)) 308 continue; 309 310 uname = fdt_get_name(fdt, child, NULL); 311 312 err = __reserved_mem_reserve_reg(child, uname); 313 if (!err) 314 count++; 315 /* 316 * Save the nodes for the dynamically-placed regions 317 * into an array which will be used for allocation right 318 * after all the statically-placed regions are reserved 319 * or marked as no-map. This is done to avoid dynamically 320 * allocating from one of the statically-placed regions. 321 */ 322 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) { 323 dynamic_nodes[dynamic_nodes_cnt] = child; 324 dynamic_nodes_cnt++; 325 } 326 } 327 for (int i = 0; i < dynamic_nodes_cnt; i++) { 328 const char *uname; 329 int err; 330 331 child = dynamic_nodes[i]; 332 uname = fdt_get_name(fdt, child, NULL); 333 err = __reserved_mem_alloc_size(child, uname); 334 if (!err) 335 count++; 336 } 337 total_reserved_mem_cnt = count; 338 return 0; 339 } 340 341 /* 342 * __reserved_mem_alloc_in_range() - allocate reserved memory described with 343 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing 344 * reserved regions to keep the reserved memory contiguous if possible. 345 */ 346 static int __init __reserved_mem_alloc_in_range(phys_addr_t size, 347 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 348 phys_addr_t *res_base) 349 { 350 bool prev_bottom_up = memblock_bottom_up(); 351 bool bottom_up = false, top_down = false; 352 int ret, i; 353 354 for (i = 0; i < reserved_mem_count; i++) { 355 struct reserved_mem *rmem = &reserved_mem[i]; 356 357 /* Skip regions that were not reserved yet */ 358 if (rmem->size == 0) 359 continue; 360 361 /* 362 * If range starts next to an existing reservation, use bottom-up: 363 * |....RRRR................RRRRRRRR..............| 364 * --RRRR------ 365 */ 366 if (start >= rmem->base && start <= (rmem->base + rmem->size)) 367 bottom_up = true; 368 369 /* 370 * If range ends next to an existing reservation, use top-down: 371 * |....RRRR................RRRRRRRR..............| 372 * -------RRRR----- 373 */ 374 if (end >= rmem->base && end <= (rmem->base + rmem->size)) 375 top_down = true; 376 } 377 378 /* Change setting only if either bottom-up or top-down was selected */ 379 if (bottom_up != top_down) 380 memblock_set_bottom_up(bottom_up); 381 382 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 383 start, end, nomap, res_base); 384 385 /* Restore old setting if needed */ 386 if (bottom_up != top_down) 387 memblock_set_bottom_up(prev_bottom_up); 388 389 return ret; 390 } 391 392 /* 393 * __reserved_mem_alloc_size() - allocate reserved memory described by 394 * 'size', 'alignment' and 'alloc-ranges' properties. 395 */ 396 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname) 397 { 398 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 399 phys_addr_t start = 0, end = 0; 400 phys_addr_t base = 0, align = 0, size; 401 int len; 402 const __be32 *prop; 403 bool nomap; 404 int ret; 405 406 prop = of_get_flat_dt_prop(node, "size", &len); 407 if (!prop) 408 return -EINVAL; 409 410 if (len != dt_root_size_cells * sizeof(__be32)) { 411 pr_err("invalid size property in '%s' node.\n", uname); 412 return -EINVAL; 413 } 414 size = dt_mem_next_cell(dt_root_size_cells, &prop); 415 416 prop = of_get_flat_dt_prop(node, "alignment", &len); 417 if (prop) { 418 if (len != dt_root_size_cells * sizeof(__be32)) { 419 pr_err("invalid alignment property in '%s' node.\n", 420 uname); 421 return -EINVAL; 422 } 423 align = dt_mem_next_cell(dt_root_size_cells, &prop); 424 } 425 426 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 427 428 /* Need adjust the alignment to satisfy the CMA requirement */ 429 if (IS_ENABLED(CONFIG_CMA) 430 && of_flat_dt_is_compatible(node, "shared-dma-pool") 431 && of_get_flat_dt_prop(node, "reusable", NULL) 432 && !nomap) 433 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); 434 435 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 436 if (prop) { 437 438 if (len % t_len != 0) { 439 pr_err("invalid alloc-ranges property in '%s', skipping node.\n", 440 uname); 441 return -EINVAL; 442 } 443 444 while (len > 0) { 445 start = dt_mem_next_cell(dt_root_addr_cells, &prop); 446 end = start + dt_mem_next_cell(dt_root_size_cells, 447 &prop); 448 449 base = 0; 450 ret = __reserved_mem_alloc_in_range(size, align, 451 start, end, nomap, &base); 452 if (ret == 0) { 453 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 454 uname, &base, 455 (unsigned long)(size / SZ_1M)); 456 break; 457 } 458 len -= t_len; 459 } 460 461 } else { 462 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 463 0, 0, nomap, &base); 464 if (ret == 0) 465 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 466 uname, &base, (unsigned long)(size / SZ_1M)); 467 } 468 469 if (base == 0) { 470 pr_err("failed to allocate memory for node '%s': size %lu MiB\n", 471 uname, (unsigned long)(size / SZ_1M)); 472 return -ENOMEM; 473 } 474 475 /* Save region in the reserved_mem array */ 476 fdt_reserved_mem_save_node(node, uname, base, size); 477 return 0; 478 } 479 480 static const struct of_device_id __rmem_of_table_sentinel 481 __used __section("__reservedmem_of_table_end"); 482 483 /* 484 * __reserved_mem_init_node() - call region specific reserved memory init code 485 */ 486 static int __init __reserved_mem_init_node(struct reserved_mem *rmem) 487 { 488 extern const struct of_device_id __reservedmem_of_table[]; 489 const struct of_device_id *i; 490 int ret = -ENOENT; 491 492 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 493 reservedmem_of_init_fn initfn = i->data; 494 const char *compat = i->compatible; 495 496 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 497 continue; 498 499 ret = initfn(rmem); 500 if (ret == 0) { 501 pr_info("initialized node %s, compatible id %s\n", 502 rmem->name, compat); 503 break; 504 } 505 } 506 return ret; 507 } 508 509 static int __init __rmem_cmp(const void *a, const void *b) 510 { 511 const struct reserved_mem *ra = a, *rb = b; 512 513 if (ra->base < rb->base) 514 return -1; 515 516 if (ra->base > rb->base) 517 return 1; 518 519 /* 520 * Put the dynamic allocations (address == 0, size == 0) before static 521 * allocations at address 0x0 so that overlap detection works 522 * correctly. 523 */ 524 if (ra->size < rb->size) 525 return -1; 526 if (ra->size > rb->size) 527 return 1; 528 529 if (ra->fdt_node < rb->fdt_node) 530 return -1; 531 if (ra->fdt_node > rb->fdt_node) 532 return 1; 533 534 return 0; 535 } 536 537 static void __init __rmem_check_for_overlap(void) 538 { 539 int i; 540 541 if (reserved_mem_count < 2) 542 return; 543 544 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), 545 __rmem_cmp, NULL); 546 for (i = 0; i < reserved_mem_count - 1; i++) { 547 struct reserved_mem *this, *next; 548 549 this = &reserved_mem[i]; 550 next = &reserved_mem[i + 1]; 551 552 if (this->base + this->size > next->base) { 553 phys_addr_t this_end, next_end; 554 555 this_end = this->base + this->size; 556 next_end = next->base + next->size; 557 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", 558 this->name, &this->base, &this_end, 559 next->name, &next->base, &next_end); 560 } 561 } 562 } 563 564 /** 565 * fdt_init_reserved_mem_node() - Initialize a reserved memory region 566 * @rmem: reserved_mem struct of the memory region to be initialized. 567 * 568 * This function is used to call the region specific initialization 569 * function for a reserved memory region. 570 */ 571 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem) 572 { 573 unsigned long node = rmem->fdt_node; 574 int err = 0; 575 bool nomap; 576 577 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 578 579 err = __reserved_mem_init_node(rmem); 580 if (err != 0 && err != -ENOENT) { 581 pr_info("node %s compatible matching fail\n", rmem->name); 582 if (nomap) 583 memblock_clear_nomap(rmem->base, rmem->size); 584 else 585 memblock_phys_free(rmem->base, rmem->size); 586 } else { 587 phys_addr_t end = rmem->base + rmem->size - 1; 588 bool reusable = 589 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL; 590 591 pr_info("%pa..%pa (%lu KiB) %s %s %s\n", 592 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), 593 nomap ? "nomap" : "map", 594 reusable ? "reusable" : "non-reusable", 595 rmem->name ? rmem->name : "unknown"); 596 } 597 } 598 599 struct rmem_assigned_device { 600 struct device *dev; 601 struct reserved_mem *rmem; 602 struct list_head list; 603 }; 604 605 static LIST_HEAD(of_rmem_assigned_device_list); 606 static DEFINE_MUTEX(of_rmem_assigned_device_mutex); 607 608 /** 609 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to 610 * given device 611 * @dev: Pointer to the device to configure 612 * @np: Pointer to the device_node with 'reserved-memory' property 613 * @idx: Index of selected region 614 * 615 * This function assigns respective DMA-mapping operations based on reserved 616 * memory region specified by 'memory-region' property in @np node to the @dev 617 * device. When driver needs to use more than one reserved memory region, it 618 * should allocate child devices and initialize regions by name for each of 619 * child device. 620 * 621 * Returns error code or zero on success. 622 */ 623 int of_reserved_mem_device_init_by_idx(struct device *dev, 624 struct device_node *np, int idx) 625 { 626 struct rmem_assigned_device *rd; 627 struct device_node *target; 628 struct reserved_mem *rmem; 629 int ret; 630 631 if (!np || !dev) 632 return -EINVAL; 633 634 target = of_parse_phandle(np, "memory-region", idx); 635 if (!target) 636 return -ENODEV; 637 638 if (!of_device_is_available(target)) { 639 of_node_put(target); 640 return 0; 641 } 642 643 rmem = of_reserved_mem_lookup(target); 644 of_node_put(target); 645 646 if (!rmem || !rmem->ops || !rmem->ops->device_init) 647 return -EINVAL; 648 649 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL); 650 if (!rd) 651 return -ENOMEM; 652 653 ret = rmem->ops->device_init(rmem, dev); 654 if (ret == 0) { 655 rd->dev = dev; 656 rd->rmem = rmem; 657 658 mutex_lock(&of_rmem_assigned_device_mutex); 659 list_add(&rd->list, &of_rmem_assigned_device_list); 660 mutex_unlock(&of_rmem_assigned_device_mutex); 661 662 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); 663 } else { 664 kfree(rd); 665 } 666 667 return ret; 668 } 669 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); 670 671 /** 672 * of_reserved_mem_device_init_by_name() - assign named reserved memory region 673 * to given device 674 * @dev: pointer to the device to configure 675 * @np: pointer to the device node with 'memory-region' property 676 * @name: name of the selected memory region 677 * 678 * Returns: 0 on success or a negative error-code on failure. 679 */ 680 int of_reserved_mem_device_init_by_name(struct device *dev, 681 struct device_node *np, 682 const char *name) 683 { 684 int idx = of_property_match_string(np, "memory-region-names", name); 685 686 return of_reserved_mem_device_init_by_idx(dev, np, idx); 687 } 688 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); 689 690 /** 691 * of_reserved_mem_device_release() - release reserved memory device structures 692 * @dev: Pointer to the device to deconfigure 693 * 694 * This function releases structures allocated for memory region handling for 695 * the given device. 696 */ 697 void of_reserved_mem_device_release(struct device *dev) 698 { 699 struct rmem_assigned_device *rd, *tmp; 700 LIST_HEAD(release_list); 701 702 mutex_lock(&of_rmem_assigned_device_mutex); 703 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { 704 if (rd->dev == dev) 705 list_move_tail(&rd->list, &release_list); 706 } 707 mutex_unlock(&of_rmem_assigned_device_mutex); 708 709 list_for_each_entry_safe(rd, tmp, &release_list, list) { 710 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) 711 rd->rmem->ops->device_release(rd->rmem, dev); 712 713 kfree(rd); 714 } 715 } 716 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); 717 718 /** 719 * of_reserved_mem_lookup() - acquire reserved_mem from a device node 720 * @np: node pointer of the desired reserved-memory region 721 * 722 * This function allows drivers to acquire a reference to the reserved_mem 723 * struct based on a device node handle. 724 * 725 * Returns a reserved_mem reference, or NULL on error. 726 */ 727 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) 728 { 729 const char *name; 730 int i; 731 732 if (!np->full_name) 733 return NULL; 734 735 name = kbasename(np->full_name); 736 for (i = 0; i < reserved_mem_count; i++) 737 if (!strcmp(reserved_mem[i].name, name)) 738 return &reserved_mem[i]; 739 740 return NULL; 741 } 742 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); 743