Lines Matching +full:add +full:- +full:pmem

1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/memory-tiers.h>
54 * Node 0 & 1 are CPU + DRAM nodes, node 2 & 3 are PMEM nodes.
63 * memory_tiers0 = 0-1
64 * memory_tiers1 = 2-3
73 * Node 0 & 1 are CPU + DRAM nodes, node 2 is memory-only DRAM node.
81 * memory_tiers0 = 0-2
89 * Node 0 is CPU + DRAM nodes, Node 1 is HBM node, node 2 is PMEM node.
126 list_for_each_entry(memtype, &memtier->memory_types, tier_sibling) in get_memtier_nodemask()
127 nodes_or(nodes, nodes, memtype->nodes); in get_memtier_nodemask()
175 int adistance = memtype->adistance; in find_create_memory_tier()
185 if (!list_empty(&memtype->tier_sibling)) { in find_create_memory_tier()
187 if (adistance == memtier->adistance_start) in find_create_memory_tier()
191 return ERR_PTR(-EINVAL); in find_create_memory_tier()
195 if (adistance == memtier->adistance_start) { in find_create_memory_tier()
197 } else if (adistance < memtier->adistance_start) { in find_create_memory_tier()
205 return ERR_PTR(-ENOMEM); in find_create_memory_tier()
207 new_memtier->adistance_start = adistance; in find_create_memory_tier()
208 INIT_LIST_HEAD(&new_memtier->list); in find_create_memory_tier()
209 INIT_LIST_HEAD(&new_memtier->memory_types); in find_create_memory_tier()
211 list_add_tail(&new_memtier->list, &memtier->list); in find_create_memory_tier()
213 list_add_tail(&new_memtier->list, &memory_tiers); in find_create_memory_tier()
215 new_memtier->dev.id = adistance >> MEMTIER_CHUNK_BITS; in find_create_memory_tier()
216 new_memtier->dev.bus = &memory_tier_subsys; in find_create_memory_tier()
217 new_memtier->dev.release = memory_tier_device_release; in find_create_memory_tier()
218 new_memtier->dev.groups = memtier_dev_groups; in find_create_memory_tier()
220 ret = device_register(&new_memtier->dev); in find_create_memory_tier()
222 list_del(&new_memtier->list); in find_create_memory_tier()
223 put_device(&new_memtier->dev); in find_create_memory_tier()
229 list_add(&memtype->tier_sibling, &memtier->memory_types); in find_create_memory_tier()
245 return rcu_dereference_check(pgdat->memtier, in __node_get_memory_tier()
261 memtier = rcu_dereference(pgdat->memtier); in node_is_toptier()
266 if (memtier->adistance_start <= top_tier_adistance) in node_is_toptier()
285 memtier = rcu_dereference(pgdat->memtier); in node_get_allowed_targets()
287 *targets = memtier->lower_tier_mask; in node_get_allowed_targets()
294 * next_demotion_node() - Get the next node in the demotion path
324 * In addition, we can also use round-robin to select in next_demotion_node()
327 * that may cause cache ping-pong due to the changing of in next_demotion_node()
328 * last target node. Or introducing per-cpu data to avoid in next_demotion_node()
332 target = node_random(&nd->preferred); in next_demotion_node()
347 * to access pgda->memtier. in disable_all_demotion_targets()
351 memtier->lower_tier_mask = NODE_MASK_NONE; in disable_all_demotion_targets()
383 best_distance = -1; in establish_demotion_targets()
387 if (!memtier || list_is_last(&memtier->list, &memory_tiers)) in establish_demotion_targets()
396 * Add all memory nodes except the selected memory tier in establish_demotion_targets()
404 * add them to the preferred mask. We randomly select between nodes in establish_demotion_targets()
413 if (distance == best_distance || best_distance == -1) { in establish_demotion_targets()
415 node_set(target, nd->preferred); in establish_demotion_targets()
437 top_tier_adistance = memtier->adistance_start + in establish_demotion_targets()
438 MEMTIER_CHUNK_SIZE - 1; in establish_demotion_targets()
457 memtier->lower_tier_mask = lower_tier; in establish_demotion_targets()
479 kref_get(&memtype->kref); in __init_node_memory_type()
493 return ERR_PTR(-EINVAL); in set_node_memory_tier()
498 node_set(node, memtype->nodes); in set_node_memory_tier()
501 rcu_assign_pointer(pgdat->memtier, memtier); in set_node_memory_tier()
507 list_del(&memtier->list); in destroy_memory_tier()
508 device_unregister(&memtier->dev); in destroy_memory_tier()
533 rcu_assign_pointer(pgdat->memtier, NULL); in clear_node_memory_tier()
536 node_clear(node, memtype->nodes); in clear_node_memory_tier()
537 if (nodes_empty(memtype->nodes)) { in clear_node_memory_tier()
538 list_del_init(&memtype->tier_sibling); in clear_node_memory_tier()
539 if (list_empty(&memtier->memory_types)) in clear_node_memory_tier()
561 return ERR_PTR(-ENOMEM); in alloc_memory_type()
563 memtype->adistance = adistance; in alloc_memory_type()
564 INIT_LIST_HEAD(&memtype->tier_sibling); in alloc_memory_type()
565 memtype->nodes = NODE_MASK_NONE; in alloc_memory_type()
566 kref_init(&memtype->kref); in alloc_memory_type()
573 kref_put(&memtype->kref, release_memtype); in put_memory_type()
590 node_memory_types[node].map_count--; in clear_node_memory_type()
608 prefix, coord->read_latency, coord->write_latency, in dump_hmem_attrs()
609 coord->read_bandwidth, coord->write_bandwidth); in dump_hmem_attrs()
619 rc = -EIO; in mt_set_default_dram_perf()
623 if (perf->read_latency + perf->write_latency == 0 || in mt_set_default_dram_perf()
624 perf->read_bandwidth + perf->write_bandwidth == 0) { in mt_set_default_dram_perf()
625 rc = -EINVAL; in mt_set_default_dram_perf()
642 if (abs(perf->read_latency - default_dram_perf.read_latency) * 10 > in mt_set_default_dram_perf()
644 abs(perf->write_latency - default_dram_perf.write_latency) * 10 > in mt_set_default_dram_perf()
646 abs(perf->read_bandwidth - default_dram_perf.read_bandwidth) * 10 > in mt_set_default_dram_perf()
648 abs(perf->write_bandwidth - default_dram_perf.write_bandwidth) * 10 > in mt_set_default_dram_perf()
651 "memory-tiers: the performance of DRAM node %d mismatches that of the reference\n" in mt_set_default_dram_perf()
661 rc = -EINVAL; in mt_set_default_dram_perf()
672 return -EIO; in mt_perf_to_adistance()
675 return -ENOENT; in mt_perf_to_adistance()
677 if (perf->read_latency + perf->write_latency == 0 || in mt_perf_to_adistance()
678 perf->read_bandwidth + perf->write_bandwidth == 0) in mt_perf_to_adistance()
679 return -EINVAL; in mt_perf_to_adistance()
690 (perf->read_latency + perf->write_latency) / in mt_perf_to_adistance()
693 (perf->read_bandwidth + perf->write_bandwidth); in mt_perf_to_adistance()
701 * register_mt_adistance_algorithm() - Register memory tiering abstract distance algorithm
729 * unregister_mt_adistance_algorithm() - Unregister memory tiering abstract distance algorithm
741 * mt_calc_adistance() - Calculate abstract distance with registered algorithms
764 * changing status, like online->offline. in memtier_hotplug_callback()
766 if (arg->status_change_nid < 0) in memtier_hotplug_callback()
772 if (clear_node_memory_tier(arg->status_change_nid)) in memtier_hotplug_callback()
778 memtier = set_node_memory_tier(arg->status_change_nid); in memtier_hotplug_callback()
812 * Look at all the existing N_MEMORY nodes and add them to in memory_tier_init()
876 return -ENOMEM; in numa_init_sysfs()