Lines Matching +full:locality +full:- +full:specific

1 // SPDX-License-Identifier: GPL-2.0
27 #include <linux/memory-tiers.h>
96 if (initiator->processor_pxm == cpu_pxm) in find_mem_initiator()
106 if (target->memory_pxm == mem_pxm) in find_mem_target()
112 * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size
130 return -ENOENT; in hmat_get_extended_linear_cache_size()
132 list_for_each_entry(tcache, &target->caches, node) { in hmat_get_extended_linear_cache_size()
133 if (tcache->cache_attrs.address_mode != in hmat_get_extended_linear_cache_size()
137 res = &target->memregions; in hmat_get_extended_linear_cache_size()
141 *cache_size = tcache->cache_attrs.size; in hmat_get_extended_linear_cache_size()
157 uid_ptr = target->gen_port_device_handle + 8; in acpi_find_genport_target()
167 * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
184 return -ENOENT; in acpi_get_genport_coordinates()
187 target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL]; in acpi_get_genport_coordinates()
189 target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU]; in acpi_get_genport_coordinates()
210 initiator->processor_pxm = cpu_pxm; in alloc_memory_initiator()
211 initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU); in alloc_memory_initiator()
212 list_add_tail(&initiator->node, &initiators); in alloc_memory_initiator()
224 target->memory_pxm = mem_pxm; in alloc_target()
225 target->processor_pxm = PXM_INVAL; in alloc_target()
226 target->memregions = (struct resource) { in alloc_target()
229 .end = -1, in alloc_target()
232 list_add_tail(&target->node, &targets); in alloc_target()
233 INIT_LIST_HEAD(&target->caches); in alloc_target()
251 * in the per-target memregions resource tree. in alloc_memory_target()
253 if (!__request_region(&target->memregions, start, len, "memory target", in alloc_memory_target()
255 pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n", in alloc_memory_target()
267 memcpy(target->gen_port_device_handle, handle, in alloc_genport_target()
347 target->coord[access].read_latency = value; in hmat_update_target_access()
348 target->coord[access].write_latency = value; in hmat_update_target_access()
351 target->coord[access].read_latency = value; in hmat_update_target_access()
354 target->coord[access].write_latency = value; in hmat_update_target_access()
357 target->coord[access].read_bandwidth = value; in hmat_update_target_access()
358 target->coord[access].write_bandwidth = value; in hmat_update_target_access()
361 target->coord[access].read_bandwidth = value; in hmat_update_target_access()
364 target->coord[access].write_bandwidth = value; in hmat_update_target_access()
378 return -EINVAL; in hmat_update_target_coordinates()
384 return -ENODEV; in hmat_update_target_coordinates()
387 coord->read_latency, access); in hmat_update_target_coordinates()
389 coord->write_latency, access); in hmat_update_target_coordinates()
391 coord->read_bandwidth, access); in hmat_update_target_coordinates()
393 coord->write_bandwidth, access); in hmat_update_target_coordinates()
394 target->ext_updated = true; in hmat_update_target_coordinates()
406 pr_notice_once("Failed to allocate HMAT locality\n"); in hmat_add_locality()
410 loc->hmat_loc = hmat_loc; in hmat_add_locality()
411 list_add_tail(&loc->node, &localities); in hmat_add_locality()
413 switch (hmat_loc->data_type) { in hmat_add_locality()
447 if (target && target->processor_pxm == init_pxm) { in hmat_update_target()
466 if (hmat_loc->header.length < sizeof(*hmat_loc)) { in hmat_parse_locality()
467 pr_notice("Unexpected locality header length: %u\n", in hmat_parse_locality()
468 hmat_loc->header.length); in hmat_parse_locality()
469 return -EINVAL; in hmat_parse_locality()
472 type = hmat_loc->data_type; in hmat_parse_locality()
473 mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY; in hmat_parse_locality()
474 ipds = hmat_loc->number_of_initiator_Pds; in hmat_parse_locality()
475 tpds = hmat_loc->number_of_target_Pds; in hmat_parse_locality()
478 if (hmat_loc->header.length < total_size) { in hmat_parse_locality()
479 pr_notice("Unexpected locality header length:%u, minimum required:%u\n", in hmat_parse_locality()
480 hmat_loc->header.length, total_size); in hmat_parse_locality()
481 return -EINVAL; in hmat_parse_locality()
484 pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", in hmat_parse_locality()
485 hmat_loc->flags, hmat_data_type(type), ipds, tpds, in hmat_parse_locality()
486 hmat_loc->entry_base_unit); in hmat_parse_locality()
495 hmat_loc->entry_base_unit, in hmat_parse_locality()
497 pr_debug(" Initiator-Target[%u-%u]:%u%s\n", in hmat_parse_locality()
520 if (cache->header.length < sizeof(*cache)) { in hmat_parse_cache()
522 cache->header.length); in hmat_parse_cache()
523 return -EINVAL; in hmat_parse_cache()
526 attrs = cache->cache_attributes; in hmat_parse_cache()
528 cache->memory_PD, cache->cache_size, attrs, in hmat_parse_cache()
529 cache->number_of_SMBIOShandles); in hmat_parse_cache()
531 target = find_mem_target(cache->memory_PD); in hmat_parse_cache()
541 tcache->cache_attrs.size = cache->cache_size; in hmat_parse_cache()
542 tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4; in hmat_parse_cache()
543 tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16; in hmat_parse_cache()
547 tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; in hmat_parse_cache()
549 if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) { in hmat_parse_cache()
550 tcache->cache_attrs.address_mode = in hmat_parse_cache()
555 tcache->cache_attrs.indexing = NODE_CACHE_INDEXED; in hmat_parse_cache()
559 tcache->cache_attrs.indexing = NODE_CACHE_OTHER; in hmat_parse_cache()
565 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK; in hmat_parse_cache()
568 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH; in hmat_parse_cache()
572 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER; in hmat_parse_cache()
575 list_add_tail(&tcache->node, &target->caches); in hmat_parse_cache()
586 if (p->header.length != sizeof(*p)) { in hmat_parse_proximity_domain()
588 p->header.length); in hmat_parse_proximity_domain()
589 return -EINVAL; in hmat_parse_proximity_domain()
594 p->reserved3, p->reserved4, p->flags, p->processor_PD, in hmat_parse_proximity_domain()
595 p->memory_PD); in hmat_parse_proximity_domain()
598 p->flags, p->processor_PD, p->memory_PD); in hmat_parse_proximity_domain()
600 if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) || in hmat_parse_proximity_domain()
602 target = find_mem_target(p->memory_PD); in hmat_parse_proximity_domain()
605 return -EINVAL; in hmat_parse_proximity_domain()
608 if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) { in hmat_parse_proximity_domain()
609 int p_node = pxm_to_node(p->processor_PD); in hmat_parse_proximity_domain()
613 return -EINVAL; in hmat_parse_proximity_domain()
615 target->processor_pxm = p->processor_PD; in hmat_parse_proximity_domain()
627 return -EINVAL; in hmat_parse_subtable()
629 switch (hdr->type) { in hmat_parse_subtable()
637 return -EINVAL; in hmat_parse_subtable()
647 return -EINVAL; in srat_parse_mem_affinity()
648 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) in srat_parse_mem_affinity()
650 alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length); in srat_parse_mem_affinity()
660 return -EINVAL; in srat_parse_genport_affinity()
662 if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)) in srat_parse_genport_affinity()
666 if (ga->device_handle_type != 0) in srat_parse_genport_affinity()
669 alloc_genport_target(ga->proximity_domain, in srat_parse_genport_affinity()
670 (u8 *)ga->device_handle); in srat_parse_genport_affinity()
683 ipds = hmat_loc->number_of_initiator_Pds; in hmat_initiator_perf()
684 tpds = hmat_loc->number_of_target_Pds; in hmat_initiator_perf()
690 if (inits[i] == initiator->processor_pxm) { in hmat_initiator_perf()
700 if (targs[i] == target->memory_pxm) { in hmat_initiator_perf()
709 hmat_loc->entry_base_unit, in hmat_initiator_perf()
710 hmat_loc->data_type); in hmat_initiator_perf()
751 return ia->processor_pxm - ib->processor_pxm; in initiator_cmp()
759 return -ENXIO; in initiators_to_nodemask()
762 set_bit(initiator->processor_pxm, p_nodes); in initiators_to_nodemask()
777 if (target->ext_updated) in hmat_update_target_attrs()
783 !(*(u16 *)target->gen_port_device_handle)) in hmat_update_target_attrs()
792 if (target->processor_pxm != PXM_INVAL) { in hmat_update_target_attrs()
793 cpu_nid = pxm_to_node(target->processor_pxm); in hmat_update_target_attrs()
796 set_bit(target->processor_pxm, p_nodes); in hmat_update_target_attrs()
825 !initiator->has_cpu) { in hmat_update_target_attrs()
826 clear_bit(initiator->processor_pxm, p_nodes); in hmat_update_target_attrs()
829 if (!test_bit(initiator->processor_pxm, p_nodes)) in hmat_update_target_attrs()
832 value = hmat_initiator_perf(target, initiator, loc->hmat_loc); in hmat_update_target_attrs()
833 if (hmat_update_best(loc->hmat_loc->data_type, value, &best)) in hmat_update_target_attrs()
834 bitmap_clear(p_nodes, 0, initiator->processor_pxm); in hmat_update_target_attrs()
836 clear_bit(initiator->processor_pxm, p_nodes); in hmat_update_target_attrs()
839 hmat_update_target_access(target, loc->hmat_loc->data_type, best, access); in hmat_update_target_attrs()
850 mem_nid = pxm_to_node(target->memory_pxm); in __hmat_register_target_initiators()
880 unsigned mem_nid = pxm_to_node(target->memory_pxm); in hmat_register_target_cache()
883 list_for_each_entry(tcache, &target->caches, node) in hmat_register_target_cache()
884 node_add_cache(mem_nid, &tcache->cache_attrs); in hmat_register_target_cache()
889 unsigned mem_nid = pxm_to_node(target->memory_pxm); in hmat_register_target_perf()
890 node_set_perf_attrs(mem_nid, &target->coord[access], access); in hmat_register_target_perf()
904 for (res = target->memregions.child; res; res = res->sibling) { in hmat_register_target_devices()
905 int target_nid = pxm_to_node(target->memory_pxm); in hmat_register_target_devices()
913 int nid = pxm_to_node(target->memory_pxm); in hmat_register_target()
926 if (*(u16 *)target->gen_port_device_handle) { in hmat_register_target()
928 target->registered = true; in hmat_register_target()
934 * marked EFI_MEMORY_SP, "specific purpose", is applied in hmat_register_target()
937 * memory-only "hotplug" node is offline. in hmat_register_target()
943 if (!target->registered) { in hmat_register_target()
948 target->registered = true; in hmat_register_target()
966 int pxm, nid = mnb->status_change_nid; in hmat_callback()
992 attrs = &target->coord[ACCESS_COORDINATE_CPU]; in hmat_set_default_dram_perf()
1019 perf = &target->coord[ACCESS_COORDINATE_CPU]; in hmat_calculate_adistance()
1042 list_for_each_entry_safe(tcache, cnext, &target->caches, node) { in hmat_free_structures()
1043 list_del(&tcache->node); in hmat_free_structures()
1047 list_del(&target->node); in hmat_free_structures()
1048 res = target->memregions.child; in hmat_free_structures()
1050 res_next = res->sibling; in hmat_free_structures()
1051 __release_region(&target->memregions, res->start, in hmat_free_structures()
1059 list_del(&initiator->node); in hmat_free_structures()
1064 list_del(&loc->node); in hmat_free_structures()
1100 hmat_revision = tbl->revision; in hmat_init()