Lines Matching +full:rpm +full:- +full:msg +full:- +full:ram

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
44 * two regions are cached and non-cached memory respectively. Each region
48 * Items in the non-cached region are allocated from the start of the partition
50 * is hence the region between the cached and non-cached offsets. The header of
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
92 * struct smem_proc_comm - proc_comm communication struct (legacy)
104 * struct smem_global_entry - entry to reference smem items on the heap
120 * struct smem_header - header found in beginning of primary smem region
140 * struct smem_ptable_entry - one entry in the @smem_ptable list
160 * struct smem_ptable - partition table for the private partitions
178 * struct smem_partition_header - header of the partitions
200 * struct smem_partition - describes smem partition
216 * struct smem_private_entry - header of each item in the private partition
235 * struct smem_info - smem region info located after the table of contents
253 * struct smem_region - representation of a chunk of memory used for smem
265 * struct qcom_smem - device data for the smem device
296 return p + le32_to_cpu(phdr->offset_free_uncached); in phdr_to_last_uncached_entry()
306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry()
314 return p + le32_to_cpu(phdr->offset_free_cached); in phdr_to_last_cached_entry()
330 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + in uncached_entry_next()
331 le32_to_cpu(e->size); in uncached_entry_next()
339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next()
346 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); in uncached_entry_to_item()
353 return p - le32_to_cpu(e->size); in cached_entry_to_item()
363 * qcom_smem_is_available() - Check if SMEM is available
384 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_alloc_private()
385 p_end = (void *)phdr + part->size; in qcom_smem_alloc_private()
392 return -EINVAL; in qcom_smem_alloc_private()
395 if (hdr->canary != SMEM_PRIVATE_CANARY) in qcom_smem_alloc_private()
397 if (le16_to_cpu(hdr->item) == item) in qcom_smem_alloc_private()
398 return -EEXIST; in qcom_smem_alloc_private()
404 return -EINVAL; in qcom_smem_alloc_private()
409 dev_err(smem->dev, "Out of memory\n"); in qcom_smem_alloc_private()
410 return -ENOSPC; in qcom_smem_alloc_private()
413 hdr->canary = SMEM_PRIVATE_CANARY; in qcom_smem_alloc_private()
414 hdr->item = cpu_to_le16(item); in qcom_smem_alloc_private()
415 hdr->size = cpu_to_le32(ALIGN(size, 8)); in qcom_smem_alloc_private()
416 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); in qcom_smem_alloc_private()
417 hdr->padding_hdr = 0; in qcom_smem_alloc_private()
425 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); in qcom_smem_alloc_private()
429 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_alloc_private()
430 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_alloc_private()
432 return -EINVAL; in qcom_smem_alloc_private()
442 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
443 entry = &header->toc[item]; in qcom_smem_alloc_global()
444 if (entry->allocated) in qcom_smem_alloc_global()
445 return -EEXIST; in qcom_smem_alloc_global()
448 if (WARN_ON(size > le32_to_cpu(header->available))) in qcom_smem_alloc_global()
449 return -ENOMEM; in qcom_smem_alloc_global()
451 entry->offset = header->free_offset; in qcom_smem_alloc_global()
452 entry->size = cpu_to_le32(size); in qcom_smem_alloc_global()
460 entry->allocated = cpu_to_le32(1); in qcom_smem_alloc_global()
462 le32_add_cpu(&header->free_offset, size); in qcom_smem_alloc_global()
463 le32_add_cpu(&header->available, -size); in qcom_smem_alloc_global()
469 * qcom_smem_alloc() - allocate space for a smem item
470 * @host: remote processor id, or -1
484 return -EPROBE_DEFER; in qcom_smem_alloc()
487 dev_err(__smem->dev, in qcom_smem_alloc()
489 return -EINVAL; in qcom_smem_alloc()
492 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_alloc()
493 return -EINVAL; in qcom_smem_alloc()
495 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_alloc()
501 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_alloc()
502 part = &__smem->partitions[host]; in qcom_smem_alloc()
504 } else if (__smem->global_partition.virt_base) { in qcom_smem_alloc()
505 part = &__smem->global_partition; in qcom_smem_alloc()
511 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_alloc()
529 header = smem->regions[0].virt_base; in qcom_smem_get_global()
530 entry = &header->toc[item]; in qcom_smem_get_global()
531 if (!entry->allocated) in qcom_smem_get_global()
532 return ERR_PTR(-ENXIO); in qcom_smem_get_global()
534 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; in qcom_smem_get_global()
536 for (i = 0; i < smem->num_regions; i++) { in qcom_smem_get_global()
537 region = &smem->regions[i]; in qcom_smem_get_global()
539 if ((u32)region->aux_base == aux_base || !aux_base) { in qcom_smem_get_global()
540 e_size = le32_to_cpu(entry->size); in qcom_smem_get_global()
541 entry_offset = le32_to_cpu(entry->offset); in qcom_smem_get_global()
543 if (WARN_ON(e_size + entry_offset > region->size)) in qcom_smem_get_global()
544 return ERR_PTR(-EINVAL); in qcom_smem_get_global()
549 return region->virt_base + entry_offset; in qcom_smem_get_global()
553 return ERR_PTR(-ENOENT); in qcom_smem_get_global()
567 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_get_private()
568 p_end = (void *)phdr + part->size; in qcom_smem_get_private()
574 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
577 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
579 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
580 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
582 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
583 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
585 *size = e_size - padding_data; in qcom_smem_get_private()
590 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
599 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
603 e = phdr_to_first_cached_entry(phdr, part->cacheline); in qcom_smem_get_private()
607 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
610 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
613 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
615 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
616 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
618 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
619 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
621 *size = e_size - padding_data; in qcom_smem_get_private()
626 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
631 e = cached_entry_next(e, part->cacheline); in qcom_smem_get_private()
635 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
637 return ERR_PTR(-ENOENT); in qcom_smem_get_private()
640 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_get_private()
641 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_get_private()
643 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
647 * qcom_smem_get() - resolve ptr of size of a smem item
648 * @host: the remote processor, or -1
660 void *ptr = ERR_PTR(-EPROBE_DEFER); in qcom_smem_get()
665 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_get()
666 return ERR_PTR(-EINVAL); in qcom_smem_get()
668 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_get()
674 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get()
675 part = &__smem->partitions[host]; in qcom_smem_get()
677 } else if (__smem->global_partition.virt_base) { in qcom_smem_get()
678 part = &__smem->global_partition; in qcom_smem_get()
684 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_get()
692 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
693 * @host: the remote processor identifying a partition, or -1
706 return -EPROBE_DEFER; in qcom_smem_get_free_space()
708 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get_free_space()
709 part = &__smem->partitions[host]; in qcom_smem_get_free_space()
710 phdr = part->virt_base; in qcom_smem_get_free_space()
711 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
712 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
714 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
715 return -EINVAL; in qcom_smem_get_free_space()
716 } else if (__smem->global_partition.virt_base) { in qcom_smem_get_free_space()
717 part = &__smem->global_partition; in qcom_smem_get_free_space()
718 phdr = part->virt_base; in qcom_smem_get_free_space()
719 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
720 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
722 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
723 return -EINVAL; in qcom_smem_get_free_space()
725 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space()
726 ret = le32_to_cpu(header->available); in qcom_smem_get_free_space()
728 if (ret > __smem->regions[0].size) in qcom_smem_get_free_space()
729 return -EINVAL; in qcom_smem_get_free_space()
742 * qcom_smem_virt_to_phys() - return the physical address associated
756 part = &__smem->partitions[i]; in qcom_smem_virt_to_phys()
758 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
759 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
761 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
765 part = &__smem->global_partition; in qcom_smem_virt_to_phys()
767 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
768 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
770 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
773 for (i = 0; i < __smem->num_regions; i++) { in qcom_smem_virt_to_phys()
774 area = &__smem->regions[i]; in qcom_smem_virt_to_phys()
776 if (addr_in_range(area->virt_base, area->size, p)) { in qcom_smem_virt_to_phys()
777 offset = p - area->virt_base; in qcom_smem_virt_to_phys()
779 return (phys_addr_t)area->aux_base + offset; in qcom_smem_virt_to_phys()
788 * qcom_smem_get_soc_id() - return the SoC ID
803 *id = __le32_to_cpu(info->id); in qcom_smem_get_soc_id()
814 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version()
815 versions = header->version; in qcom_smem_get_sbl_version()
825 ptable = smem->ptable; in qcom_smem_get_ptable()
826 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) in qcom_smem_get_ptable()
827 return ERR_PTR(-ENOENT); in qcom_smem_get_ptable()
829 version = le32_to_cpu(ptable->version); in qcom_smem_get_ptable()
831 dev_err(smem->dev, in qcom_smem_get_ptable()
833 return ERR_PTR(-EINVAL); in qcom_smem_get_ptable()
847 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; in qcom_smem_get_item_count()
848 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) in qcom_smem_get_item_count()
851 return le16_to_cpu(info->num_items); in qcom_smem_get_item_count()
867 phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); in qcom_smem_partition_header()
868 header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
873 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { in qcom_smem_partition_header()
874 dev_err(smem->dev, "bad partition magic %4ph\n", header->magic); in qcom_smem_partition_header()
878 if (host0 != le16_to_cpu(header->host0)) { in qcom_smem_partition_header()
879 dev_err(smem->dev, "bad host0 (%hu != %hu)\n", in qcom_smem_partition_header()
880 host0, le16_to_cpu(header->host0)); in qcom_smem_partition_header()
883 if (host1 != le16_to_cpu(header->host1)) { in qcom_smem_partition_header()
884 dev_err(smem->dev, "bad host1 (%hu != %hu)\n", in qcom_smem_partition_header()
885 host1, le16_to_cpu(header->host1)); in qcom_smem_partition_header()
889 size = le32_to_cpu(header->size); in qcom_smem_partition_header()
890 if (size != le32_to_cpu(entry->size)) { in qcom_smem_partition_header()
891 dev_err(smem->dev, "bad partition size (%u != %u)\n", in qcom_smem_partition_header()
892 size, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
896 if (le32_to_cpu(header->offset_free_uncached) > size) { in qcom_smem_partition_header()
897 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", in qcom_smem_partition_header()
898 le32_to_cpu(header->offset_free_uncached), size); in qcom_smem_partition_header()
913 if (smem->global_partition.virt_base) { in qcom_smem_set_global_partition()
914 dev_err(smem->dev, "Already found the global partition\n"); in qcom_smem_set_global_partition()
915 return -EINVAL; in qcom_smem_set_global_partition()
922 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_set_global_partition()
923 entry = &ptable->entry[i]; in qcom_smem_set_global_partition()
924 if (!le32_to_cpu(entry->offset)) in qcom_smem_set_global_partition()
926 if (!le32_to_cpu(entry->size)) in qcom_smem_set_global_partition()
929 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) in qcom_smem_set_global_partition()
932 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { in qcom_smem_set_global_partition()
939 dev_err(smem->dev, "Missing entry for global partition\n"); in qcom_smem_set_global_partition()
940 return -EINVAL; in qcom_smem_set_global_partition()
946 return -EINVAL; in qcom_smem_set_global_partition()
948 smem->global_partition.virt_base = (void __iomem *)header; in qcom_smem_set_global_partition()
949 smem->global_partition.phys_base = smem->regions[0].aux_base + in qcom_smem_set_global_partition()
950 le32_to_cpu(entry->offset); in qcom_smem_set_global_partition()
951 smem->global_partition.size = le32_to_cpu(entry->size); in qcom_smem_set_global_partition()
952 smem->global_partition.cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_set_global_partition()
971 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_enumerate_partitions()
972 entry = &ptable->entry[i]; in qcom_smem_enumerate_partitions()
973 if (!le32_to_cpu(entry->offset)) in qcom_smem_enumerate_partitions()
975 if (!le32_to_cpu(entry->size)) in qcom_smem_enumerate_partitions()
978 host0 = le16_to_cpu(entry->host0); in qcom_smem_enumerate_partitions()
979 host1 = le16_to_cpu(entry->host1); in qcom_smem_enumerate_partitions()
988 dev_err(smem->dev, "bad host %u\n", remote_host); in qcom_smem_enumerate_partitions()
989 return -EINVAL; in qcom_smem_enumerate_partitions()
992 if (smem->partitions[remote_host].virt_base) { in qcom_smem_enumerate_partitions()
993 dev_err(smem->dev, "duplicate host %u\n", remote_host); in qcom_smem_enumerate_partitions()
994 return -EINVAL; in qcom_smem_enumerate_partitions()
999 return -EINVAL; in qcom_smem_enumerate_partitions()
1001 smem->partitions[remote_host].virt_base = (void __iomem *)header; in qcom_smem_enumerate_partitions()
1002 smem->partitions[remote_host].phys_base = smem->regions[0].aux_base + in qcom_smem_enumerate_partitions()
1003 le32_to_cpu(entry->offset); in qcom_smem_enumerate_partitions()
1004 smem->partitions[remote_host].size = le32_to_cpu(entry->size); in qcom_smem_enumerate_partitions()
1005 smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_enumerate_partitions()
1016 region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K); in qcom_smem_map_toc()
1017 ptable_start = region->aux_base + region->size - SZ_4K; in qcom_smem_map_toc()
1019 smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K); in qcom_smem_map_toc()
1021 if (!region->virt_base || !smem->ptable) in qcom_smem_map_toc()
1022 return -ENOMEM; in qcom_smem_map_toc()
1031 phys_addr = smem->regions[0].aux_base; in qcom_smem_map_global()
1033 smem->regions[0].size = size; in qcom_smem_map_global()
1034 smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size); in qcom_smem_map_global()
1036 if (!smem->regions[0].virt_base) in qcom_smem_map_global()
1037 return -ENOMEM; in qcom_smem_map_global()
1045 struct device *dev = smem->dev; in qcom_smem_resolve_mem()
1050 np = of_parse_phandle(dev->of_node, name, 0); in qcom_smem_resolve_mem()
1053 return -EINVAL; in qcom_smem_resolve_mem()
1061 region->aux_base = r.start; in qcom_smem_resolve_mem()
1062 region->size = resource_size(&r); in qcom_smem_resolve_mem()
1081 if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram")) in qcom_smem_probe()
1084 smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions), in qcom_smem_probe()
1087 return -ENOMEM; in qcom_smem_probe()
1089 smem->dev = &pdev->dev; in qcom_smem_probe()
1090 smem->num_regions = num_regions; in qcom_smem_probe()
1092 rmem = of_reserved_mem_lookup(pdev->dev.of_node); in qcom_smem_probe()
1094 smem->regions[0].aux_base = rmem->base; in qcom_smem_probe()
1095 smem->regions[0].size = rmem->size; in qcom_smem_probe()
1098 * Fall back to the memory-region reference, if we're not a in qcom_smem_probe()
1099 * reserved-memory node. in qcom_smem_probe()
1101 ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]); in qcom_smem_probe()
1107 ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]); in qcom_smem_probe()
1113 ret = qcom_smem_map_toc(smem, &smem->regions[0]); in qcom_smem_probe()
1118 smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev, in qcom_smem_probe()
1119 smem->regions[i].aux_base, in qcom_smem_probe()
1120 smem->regions[i].size); in qcom_smem_probe()
1121 if (!smem->regions[i].virt_base) { in qcom_smem_probe()
1122 dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base); in qcom_smem_probe()
1123 return -ENOMEM; in qcom_smem_probe()
1127 header = smem->regions[0].virt_base; in qcom_smem_probe()
1128 if (le32_to_cpu(header->initialized) != 1 || in qcom_smem_probe()
1129 le32_to_cpu(header->reserved)) { in qcom_smem_probe()
1130 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); in qcom_smem_probe()
1131 return -EINVAL; in qcom_smem_probe()
1134 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); in qcom_smem_probe()
1136 if (hwlock_id != -EPROBE_DEFER) in qcom_smem_probe()
1137 dev_err(&pdev->dev, "failed to retrieve hwlock\n"); in qcom_smem_probe()
1141 smem->hwlock = hwspin_lock_request_specific(hwlock_id); in qcom_smem_probe()
1142 if (!smem->hwlock) in qcom_smem_probe()
1143 return -ENXIO; in qcom_smem_probe()
1145 ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags); in qcom_smem_probe()
1148 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset); in qcom_smem_probe()
1149 hwspin_unlock_irqrestore(smem->hwlock, &flags); in qcom_smem_probe()
1157 devm_iounmap(smem->dev, smem->regions[0].virt_base); in qcom_smem_probe()
1163 smem->item_count = qcom_smem_get_item_count(smem); in qcom_smem_probe()
1167 smem->item_count = SMEM_ITEM_COUNT; in qcom_smem_probe()
1170 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); in qcom_smem_probe()
1171 return -EINVAL; in qcom_smem_probe()
1176 if (ret < 0 && ret != -ENOENT) in qcom_smem_probe()
1181 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", in qcom_smem_probe()
1184 if (IS_ERR(smem->socinfo)) in qcom_smem_probe()
1185 dev_dbg(&pdev->dev, "failed to register socinfo device\n"); in qcom_smem_probe()
1192 platform_device_unregister(__smem->socinfo); in qcom_smem_remove()
1194 hwspin_lock_free(__smem->hwlock); in qcom_smem_remove()
1208 .name = "qcom-smem",