Lines Matching refs:cpu
278 void smp_muxed_ipi_set_message(int cpu, int msg) in smp_muxed_ipi_set_message() argument
280 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message()
290 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument
292 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass()
298 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass()
352 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument
355 smp_ops->message_pass(cpu, msg); in do_message_pass()
358 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass()
362 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
365 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in arch_smp_send_reschedule()
369 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
371 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_single_ipi()
376 unsigned int cpu; in arch_send_call_function_ipi_mask() local
378 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
379 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_ipi_mask()
470 static void do_smp_send_nmi_ipi(int cpu, bool safe) in do_smp_send_nmi_ipi() argument
472 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) in do_smp_send_nmi_ipi()
475 if (cpu >= 0) { in do_smp_send_nmi_ipi()
476 do_message_pass(cpu, PPC_MSG_NMI_IPI); in do_smp_send_nmi_ipi()
494 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), in __smp_send_nmi_ipi() argument
501 BUG_ON(cpu == me); in __smp_send_nmi_ipi()
502 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); in __smp_send_nmi_ipi()
518 if (cpu < 0) { in __smp_send_nmi_ipi()
523 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); in __smp_send_nmi_ipi()
530 do_smp_send_nmi_ipi(cpu, safe); in __smp_send_nmi_ipi()
559 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_nmi_ipi() argument
561 return __smp_send_nmi_ipi(cpu, fn, delay_us, false); in smp_send_nmi_ipi()
564 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_safe_nmi_ipi() argument
566 return __smp_send_nmi_ipi(cpu, fn, delay_us, true); in smp_send_safe_nmi_ipi()
573 unsigned int cpu; in tick_broadcast() local
575 for_each_cpu(cpu, mask) in tick_broadcast()
576 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); in tick_broadcast()
595 int cpu; in crash_send_ipi() local
599 for_each_present_cpu(cpu) { in crash_send_ipi()
600 if (cpu_online(cpu)) in crash_send_ipi()
611 do_smp_send_nmi_ipi(cpu, false); in crash_send_ipi()
869 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) in get_cpu_thread_group_start() argument
871 int hw_cpu_id = get_hard_smp_processor_id(cpu); in get_cpu_thread_group_start()
888 static struct thread_groups *__init get_thread_groups(int cpu, in get_thread_groups() argument
892 struct device_node *dn = of_get_cpu_node(cpu, NULL); in get_thread_groups()
893 struct thread_groups_list *cpu_tgl = &tgl[cpu]; in get_thread_groups()
924 int cpu, int cpu_group_start) in update_mask_from_threadgroup() argument
926 int first_thread = cpu_first_thread_sibling(cpu); in update_mask_from_threadgroup()
929 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); in update_mask_from_threadgroup()
946 static int __init init_thread_group_cache_map(int cpu, int cache_property) in init_thread_group_cache_map() argument
957 tg = get_thread_groups(cpu, cache_property, &err); in init_thread_group_cache_map()
962 cpu_group_start = get_cpu_thread_group_start(cpu, tg); in init_thread_group_cache_map()
970 mask = &per_cpu(thread_group_l1_cache_map, cpu); in init_thread_group_cache_map()
971 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
974 mask = &per_cpu(thread_group_l2_cache_map, cpu); in init_thread_group_cache_map()
975 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
976 mask = &per_cpu(thread_group_l3_cache_map, cpu); in init_thread_group_cache_map()
977 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
1033 static const struct cpumask *tl_cache_mask(struct sched_domain_topology_level *tl, int cpu) in tl_cache_mask() argument
1035 return per_cpu(cpu_l2_cache_map, cpu); in tl_cache_mask()
1039 static const struct cpumask *tl_smallcore_smt_mask(struct sched_domain_topology_level *tl, int cpu) in tl_smallcore_smt_mask() argument
1041 return cpu_smallcore_mask(cpu); in tl_smallcore_smt_mask()
1045 struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
1047 return per_cpu(cpu_coregroup_map, cpu); in cpu_coregroup_mask()
1061 int cpu; in init_big_cores() local
1063 for_each_possible_cpu(cpu) { in init_big_cores()
1064 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1); in init_big_cores()
1069 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), in init_big_cores()
1071 cpu_to_node(cpu)); in init_big_cores()
1076 for_each_possible_cpu(cpu) { in init_big_cores()
1077 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3); in init_big_cores()
1095 const struct cpumask *cpu_die_mask(int cpu) in cpu_die_mask() argument
1098 return per_cpu(cpu_coregroup_map, cpu); in cpu_die_mask()
1100 return cpu_node_mask(cpu); in cpu_die_mask()
1104 int cpu_die_id(int cpu) in cpu_die_id() argument
1107 return cpu_to_coregroup_id(cpu); in cpu_die_id()
1115 unsigned int cpu, num_threads; in smp_prepare_cpus() local
1129 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
1130 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus()
1131 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1132 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), in smp_prepare_cpus()
1133 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1134 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus()
1135 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1137 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), in smp_prepare_cpus()
1138 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1144 if (cpu_present(cpu)) { in smp_prepare_cpus()
1145 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); in smp_prepare_cpus()
1146 set_cpu_numa_mem(cpu, in smp_prepare_cpus()
1147 local_memory_node(numa_cpu_lookup_table[cpu])); in smp_prepare_cpus()
1204 unsigned int cpu = smp_processor_id(); in generic_cpu_disable() local
1206 if (cpu == boot_cpuid) in generic_cpu_disable()
1209 set_cpu_online(cpu, false); in generic_cpu_disable()
1231 void generic_cpu_die(unsigned int cpu) in generic_cpu_die() argument
1237 if (is_cpu_dead(cpu)) in generic_cpu_die()
1241 printk(KERN_ERR "CPU%d didn't die...\n", cpu); in generic_cpu_die()
1244 void generic_set_cpu_dead(unsigned int cpu) in generic_set_cpu_dead() argument
1246 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead()
1254 void generic_set_cpu_up(unsigned int cpu) in generic_set_cpu_up() argument
1256 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in generic_set_cpu_up()
1259 int generic_check_cpu_restart(unsigned int cpu) in generic_check_cpu_restart() argument
1261 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
1264 int is_cpu_dead(unsigned int cpu) in is_cpu_dead() argument
1266 return per_cpu(cpu_state, cpu) == CPU_DEAD; in is_cpu_dead()
1280 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) in cpu_idle_thread_init() argument
1283 paca_ptrs[cpu]->__current = idle; in cpu_idle_thread_init()
1284 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + in cpu_idle_thread_init()
1287 task_thread_info(idle)->cpu = cpu; in cpu_idle_thread_init()
1288 secondary_current = current_set[cpu] = idle; in cpu_idle_thread_init()
1291 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
1304 cpu_thread_in_subcore(cpu)) in __cpu_up()
1308 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
1311 cpu_idle_thread_init(cpu, tidle); in __cpu_up()
1318 rc = smp_ops->prepare_cpu(cpu); in __cpu_up()
1326 cpu_callin_map[cpu] = 0; in __cpu_up()
1335 DBG("smp: kicking cpu %d\n", cpu); in __cpu_up()
1336 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
1338 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); in __cpu_up()
1350 spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline)); in __cpu_up()
1352 if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) { in __cpu_up()
1357 while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline)) in __cpu_up()
1361 if (!cpu_callin_map[cpu]) { in __cpu_up()
1362 printk(KERN_ERR "Processor %u is stuck.\n", cpu); in __cpu_up()
1366 DBG("Processor %u found.\n", cpu); in __cpu_up()
1372 spin_until_cond(cpu_online(cpu)); in __cpu_up()
1380 int cpu_to_core_id(int cpu) in cpu_to_core_id() argument
1385 np = of_get_cpu_node(cpu, NULL); in cpu_to_core_id()
1397 int cpu_core_index_of_thread(int cpu) in cpu_core_index_of_thread() argument
1399 return cpu >> threads_shift; in cpu_core_index_of_thread()
1412 static struct device_node *cpu_to_l2cache(int cpu) in cpu_to_l2cache() argument
1417 if (!cpu_present(cpu)) in cpu_to_l2cache()
1420 np = of_get_cpu_node(cpu, NULL); in cpu_to_l2cache()
1431 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) in update_mask_by_l2() argument
1445 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1447 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) { in update_mask_by_l2()
1449 set_cpus_related(i, cpu, cpu_l2_cache_mask); in update_mask_by_l2()
1453 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) && in update_mask_by_l2()
1454 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) { in update_mask_by_l2()
1456 cpu); in update_mask_by_l2()
1462 l2_cache = cpu_to_l2cache(cpu); in update_mask_by_l2()
1465 for_each_cpu(i, cpu_sibling_mask(cpu)) in update_mask_by_l2()
1466 set_cpus_related(cpu, i, cpu_l2_cache_mask); in update_mask_by_l2()
1471 cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu)); in update_mask_by_l2()
1474 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1477 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1488 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1502 static void remove_cpu_from_masks(int cpu) in remove_cpu_from_masks() argument
1507 unmap_cpu_from_node(cpu); in remove_cpu_from_masks()
1512 for_each_cpu(i, mask_fn(cpu)) { in remove_cpu_from_masks()
1513 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); in remove_cpu_from_masks()
1514 set_cpus_unrelated(cpu, i, cpu_sibling_mask); in remove_cpu_from_masks()
1516 set_cpus_unrelated(cpu, i, cpu_smallcore_mask); in remove_cpu_from_masks()
1519 for_each_cpu(i, cpu_core_mask(cpu)) in remove_cpu_from_masks()
1520 set_cpus_unrelated(cpu, i, cpu_core_mask); in remove_cpu_from_masks()
1523 for_each_cpu(i, cpu_coregroup_mask(cpu)) in remove_cpu_from_masks()
1524 set_cpus_unrelated(cpu, i, cpu_coregroup_mask); in remove_cpu_from_masks()
1529 static inline void add_cpu_to_smallcore_masks(int cpu) in add_cpu_to_smallcore_masks() argument
1536 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); in add_cpu_to_smallcore_masks()
1538 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) { in add_cpu_to_smallcore_masks()
1540 set_cpus_related(i, cpu, cpu_smallcore_mask); in add_cpu_to_smallcore_masks()
1544 static void update_coregroup_mask(int cpu, cpumask_var_t *mask) in update_coregroup_mask() argument
1547 int coregroup_id = cpu_to_coregroup_id(cpu); in update_coregroup_mask()
1555 for_each_cpu(i, submask_fn(cpu)) in update_coregroup_mask()
1556 set_cpus_related(cpu, i, cpu_coregroup_mask); in update_coregroup_mask()
1561 cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu)); in update_coregroup_mask()
1564 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1567 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu)); in update_coregroup_mask()
1572 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1580 static void add_cpu_to_masks(int cpu) in add_cpu_to_masks() argument
1583 int first_thread = cpu_first_thread_sibling(cpu); in add_cpu_to_masks()
1593 map_cpu_to_node(cpu, cpu_to_node(cpu)); in add_cpu_to_masks()
1594 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in add_cpu_to_masks()
1595 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); in add_cpu_to_masks()
1599 set_cpus_related(i, cpu, cpu_sibling_mask); in add_cpu_to_masks()
1601 add_cpu_to_smallcore_masks(cpu); in add_cpu_to_masks()
1604 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); in add_cpu_to_masks()
1605 update_mask_by_l2(cpu, &mask); in add_cpu_to_masks()
1608 update_coregroup_mask(cpu, &mask); in add_cpu_to_masks()
1611 chip_id = cpu_to_chip_id(cpu); in add_cpu_to_masks()
1617 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1620 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); in add_cpu_to_masks()
1624 cpumask_and(mask, mask, cpu_node_mask(cpu)); in add_cpu_to_masks()
1628 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1642 unsigned int cpu = raw_smp_processor_id(); in start_secondary() local
1651 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in start_secondary()
1654 smp_store_cpu_info(cpu); in start_secondary()
1656 rcutree_report_cpu_starting(cpu); in start_secondary()
1657 cpu_callin_map[cpu] = 1; in start_secondary()
1660 smp_ops->setup_cpu(cpu); in start_secondary()
1674 set_numa_node(numa_cpu_lookup_table[cpu]); in start_secondary()
1675 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); in start_secondary()
1678 add_cpu_to_masks(cpu); in start_secondary()
1686 struct cpumask *mask = cpu_l2_cache_mask(cpu); in start_secondary()
1691 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) in start_secondary()
1696 notify_cpu_starting(cpu); in start_secondary()
1697 set_cpu_online(cpu, true); in start_secondary()
1767 int arch_asym_cpu_priority(int cpu) in arch_asym_cpu_priority() argument
1770 return -cpu / threads_per_core; in arch_asym_cpu_priority()
1772 return -cpu; in arch_asym_cpu_priority()
1778 int cpu = smp_processor_id(); in __cpu_disable() local
1791 remove_cpu_from_masks(cpu); in __cpu_disable()
1796 void __cpu_die(unsigned int cpu) in __cpu_die() argument
1802 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm))); in __cpu_die()
1804 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
1807 smp_ops->cpu_die(cpu); in __cpu_die()