Lines Matching refs:cpu

26 #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))  argument
27 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) argument
28 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) argument
29 #define per_cpu_cacheinfo_idx(cpu, idx) \ argument
30 (per_cpu_cacheinfo(cpu) + (idx))
35 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) in get_cpu_cacheinfo() argument
37 return ci_cacheinfo(cpu); in get_cpu_cacheinfo()
58 bool last_level_cache_is_valid(unsigned int cpu) in last_level_cache_is_valid() argument
62 if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu)) in last_level_cache_is_valid()
65 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); in last_level_cache_is_valid()
187 static bool match_cache_node(struct device_node *cpu, in match_cache_node() argument
190 struct device_node *prev, *cache = of_find_next_cache_node(cpu); in match_cache_node()
213 struct device_node *cpu; in cache_of_set_id() local
216 for_each_of_cpu_node(cpu) { in cache_of_set_id()
217 u64 id = of_get_cpu_hwid(cpu, 0); in cache_of_set_id()
221 of_node_put(cpu); in cache_of_set_id()
225 if (match_cache_node(cpu, cache_node)) in cache_of_set_id()
253 static int cache_setup_of_node(unsigned int cpu) in cache_setup_of_node() argument
258 struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); in cache_setup_of_node()
260 pr_err("Failed to find cpu%d device node\n", cpu); in cache_setup_of_node()
268 while (index < cache_leaves(cpu)) { in cache_setup_of_node()
269 this_leaf = per_cpu_cacheinfo_idx(cpu, index); in cache_setup_of_node()
281 if (index != cache_leaves(cpu)) /* not all OF nodes populated */ in cache_setup_of_node()
327 int init_of_cache_level(unsigned int cpu) in init_of_cache_level() argument
329 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); in init_of_cache_level()
330 struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); in init_of_cache_level()
365 static inline int cache_setup_of_node(unsigned int cpu) { return 0; } in cache_setup_of_node() argument
366 int init_of_cache_level(unsigned int cpu) { return 0; } in init_of_cache_level() argument
369 int __weak cache_setup_acpi(unsigned int cpu) in cache_setup_acpi() argument
376 static int cache_setup_properties(unsigned int cpu) in cache_setup_properties() argument
381 ret = cache_setup_of_node(cpu); in cache_setup_properties()
383 ret = cache_setup_acpi(cpu); in cache_setup_properties()
392 static int cache_shared_cpu_map_setup(unsigned int cpu) in cache_shared_cpu_map_setup() argument
394 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); in cache_shared_cpu_map_setup()
407 if (!last_level_cache_is_valid(cpu) && !use_arch_info) { in cache_shared_cpu_map_setup()
408 ret = cache_setup_properties(cpu); in cache_shared_cpu_map_setup()
413 for (index = 0; index < cache_leaves(cpu); index++) { in cache_shared_cpu_map_setup()
416 this_leaf = per_cpu_cacheinfo_idx(cpu, index); in cache_shared_cpu_map_setup()
418 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); in cache_shared_cpu_map_setup()
420 if (i == cpu || !per_cpu_cacheinfo(i)) in cache_shared_cpu_map_setup()
435 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); in cache_shared_cpu_map_setup()
451 static void cache_shared_cpu_map_remove(unsigned int cpu) in cache_shared_cpu_map_remove() argument
453 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); in cache_shared_cpu_map_remove()
457 for (index = 0; index < cache_leaves(cpu); index++) { in cache_shared_cpu_map_remove()
458 this_leaf = per_cpu_cacheinfo_idx(cpu, index); in cache_shared_cpu_map_remove()
460 if (sibling == cpu || !per_cpu_cacheinfo(sibling)) in cache_shared_cpu_map_remove()
476 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); in cache_shared_cpu_map_remove()
488 static void free_cache_attributes(unsigned int cpu) in free_cache_attributes() argument
490 if (!per_cpu_cacheinfo(cpu)) in free_cache_attributes()
493 cache_shared_cpu_map_remove(cpu); in free_cache_attributes()
496 int __weak early_cache_level(unsigned int cpu) in early_cache_level() argument
501 int __weak init_cache_level(unsigned int cpu) in init_cache_level() argument
506 int __weak populate_cache_leaves(unsigned int cpu) in populate_cache_leaves() argument
511 static inline int allocate_cache_info(int cpu) in allocate_cache_info() argument
513 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC); in allocate_cache_info()
514 if (!per_cpu_cacheinfo(cpu)) { in allocate_cache_info()
515 cache_leaves(cpu) = 0; in allocate_cache_info()
522 int fetch_cache_info(unsigned int cpu) in fetch_cache_info() argument
524 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); in fetch_cache_info()
529 ret = init_of_cache_level(cpu); in fetch_cache_info()
531 ret = acpi_get_cache_info(cpu, &levels, &split_levels); in fetch_cache_info()
544 if (ret || !cache_leaves(cpu)) { in fetch_cache_info()
545 ret = early_cache_level(cpu); in fetch_cache_info()
549 if (!cache_leaves(cpu)) in fetch_cache_info()
555 return allocate_cache_info(cpu); in fetch_cache_info()
558 static inline int init_level_allocate_ci(unsigned int cpu) in init_level_allocate_ci() argument
560 unsigned int early_leaves = cache_leaves(cpu); in init_level_allocate_ci()
572 if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels) in init_level_allocate_ci()
575 if (init_cache_level(cpu) || !cache_leaves(cpu)) in init_level_allocate_ci()
583 ci_cacheinfo(cpu)->early_ci_levels = false; in init_level_allocate_ci()
589 if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu)) in init_level_allocate_ci()
592 kfree(per_cpu_cacheinfo(cpu)); in init_level_allocate_ci()
593 return allocate_cache_info(cpu); in init_level_allocate_ci()
596 int detect_cache_attributes(unsigned int cpu) in detect_cache_attributes() argument
600 ret = init_level_allocate_ci(cpu); in detect_cache_attributes()
608 if (!last_level_cache_is_valid(cpu)) { in detect_cache_attributes()
613 ret = populate_cache_leaves(cpu); in detect_cache_attributes()
623 ret = cache_shared_cpu_map_setup(cpu); in detect_cache_attributes()
625 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); in detect_cache_attributes()
632 free_cache_attributes(cpu); in detect_cache_attributes()
638 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) argument
644 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) argument
645 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) argument
854 static void cpu_cache_sysfs_exit(unsigned int cpu) in cpu_cache_sysfs_exit() argument
859 if (per_cpu_index_dev(cpu)) { in cpu_cache_sysfs_exit()
860 for (i = 0; i < cache_leaves(cpu); i++) { in cpu_cache_sysfs_exit()
861 ci_dev = per_cache_index_dev(cpu, i); in cpu_cache_sysfs_exit()
866 kfree(per_cpu_index_dev(cpu)); in cpu_cache_sysfs_exit()
867 per_cpu_index_dev(cpu) = NULL; in cpu_cache_sysfs_exit()
869 device_unregister(per_cpu_cache_dev(cpu)); in cpu_cache_sysfs_exit()
870 per_cpu_cache_dev(cpu) = NULL; in cpu_cache_sysfs_exit()
873 static int cpu_cache_sysfs_init(unsigned int cpu) in cpu_cache_sysfs_init() argument
875 struct device *dev = get_cpu_device(cpu); in cpu_cache_sysfs_init()
877 if (per_cpu_cacheinfo(cpu) == NULL) in cpu_cache_sysfs_init()
880 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); in cpu_cache_sysfs_init()
881 if (IS_ERR(per_cpu_cache_dev(cpu))) in cpu_cache_sysfs_init()
882 return PTR_ERR(per_cpu_cache_dev(cpu)); in cpu_cache_sysfs_init()
885 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), in cpu_cache_sysfs_init()
887 if (unlikely(per_cpu_index_dev(cpu) == NULL)) in cpu_cache_sysfs_init()
893 cpu_cache_sysfs_exit(cpu); in cpu_cache_sysfs_init()
897 static int cache_add_dev(unsigned int cpu) in cache_add_dev() argument
905 rc = cpu_cache_sysfs_init(cpu); in cache_add_dev()
909 parent = per_cpu_cache_dev(cpu); in cache_add_dev()
910 for (i = 0; i < cache_leaves(cpu); i++) { in cache_add_dev()
911 this_leaf = per_cpu_cacheinfo_idx(cpu, i); in cache_add_dev()
923 per_cache_index_dev(cpu, i) = ci_dev; in cache_add_dev()
925 cpumask_set_cpu(cpu, &cache_dev_map); in cache_add_dev()
929 cpu_cache_sysfs_exit(cpu); in cache_add_dev()
933 static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu, in cpu_map_shared_cache() argument
939 if (!last_level_cache_is_valid(cpu)) in cpu_map_shared_cache()
942 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); in cpu_map_shared_cache()
954 if (sibling == cpu || !last_level_cache_is_valid(sibling)) in cpu_map_shared_cache()
975 static void update_per_cpu_data_slice_size_cpu(unsigned int cpu) in update_per_cpu_data_slice_size_cpu() argument
981 if (!last_level_cache_is_valid(cpu)) in update_per_cpu_data_slice_size_cpu()
984 ci = ci_cacheinfo(cpu); in update_per_cpu_data_slice_size_cpu()
985 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); in update_per_cpu_data_slice_size_cpu()
995 static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu, in update_per_cpu_data_slice_size() argument
1001 if (!cpu_online && icpu == cpu) in update_per_cpu_data_slice_size()
1008 static int cacheinfo_cpu_online(unsigned int cpu) in cacheinfo_cpu_online() argument
1010 int rc = detect_cache_attributes(cpu); in cacheinfo_cpu_online()
1015 rc = cache_add_dev(cpu); in cacheinfo_cpu_online()
1018 if (cpu_map_shared_cache(true, cpu, &cpu_map)) in cacheinfo_cpu_online()
1019 update_per_cpu_data_slice_size(true, cpu, cpu_map); in cacheinfo_cpu_online()
1022 free_cache_attributes(cpu); in cacheinfo_cpu_online()
1026 static int cacheinfo_cpu_pre_down(unsigned int cpu) in cacheinfo_cpu_pre_down() argument
1031 nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map); in cacheinfo_cpu_pre_down()
1032 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) in cacheinfo_cpu_pre_down()
1033 cpu_cache_sysfs_exit(cpu); in cacheinfo_cpu_pre_down()
1035 free_cache_attributes(cpu); in cacheinfo_cpu_pre_down()
1037 update_per_cpu_data_slice_size(false, cpu, cpu_map); in cacheinfo_cpu_pre_down()