/linux/drivers/irqchip/ |
H A D | irq-bcm6345-l1.c | 194 unsigned int new_cpu; in bcm6345_l1_set_affinity() local 198 new_cpu = cpumask_first_and_and(&intc->cpumask, dest, cpu_online_mask); in bcm6345_l1_set_affinity() 199 if (new_cpu >= nr_cpu_ids) in bcm6345_l1_set_affinity() 202 dest = cpumask_of(new_cpu); in bcm6345_l1_set_affinity() 205 if (old_cpu != new_cpu) { in bcm6345_l1_set_affinity() 217 irq_data_update_effective_affinity(d, cpumask_of(new_cpu)); in bcm6345_l1_set_affinity()
|
/linux/tools/perf/scripts/python/ |
H A D | sched-migration.py | 191 def migrate(self, ts_list, new, old_cpu, new_cpu): argument 192 if old_cpu == new_cpu: 199 new_rq = self.prev.rqs[new_cpu] 201 self.rqs[new_cpu] = in_rq 208 self.event_cpus.append(new_cpu)
|
/linux/drivers/hv/ |
H A D | hyperv_vmbus.h | 448 unsigned int new_cpu) in hv_update_allocated_cpus() argument 450 hv_set_allocated_cpu(new_cpu); in hv_update_allocated_cpus()
|
/linux/arch/x86/events/intel/ |
H A D | uncore.c | 1458 int new_cpu) in uncore_change_type_ctx() argument 1464 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); in uncore_change_type_ctx() 1473 box->cpu = new_cpu; in uncore_change_type_ctx() 1474 cpumask_set_cpu(new_cpu, &pmu->cpu_mask); in uncore_change_type_ctx() 1482 if (new_cpu < 0) in uncore_change_type_ctx() 1488 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); in uncore_change_type_ctx() 1489 box->cpu = new_cpu; in uncore_change_type_ctx() 1490 cpumask_set_cpu(new_cpu, &pmu->cpu_mask); in uncore_change_type_ctx() 1495 int old_cpu, int new_cpu) in uncore_change_context() argument 1498 uncore_change_type_ctx(*uncores, old_cpu, new_cpu); in uncore_change_context() [all...] |
/linux/arch/arm64/kvm/vgic/ |
H A D | vgic.c | 707 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; in vgic_prune_ap_list() local 711 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
|
/linux/arch/powerpc/perf/ |
H A D | imc-pmu.c | 344 static void nest_change_cpu_context(int old_cpu, int new_cpu) in nest_change_cpu_context() argument 348 if (old_cpu < 0 || new_cpu < 0) in nest_change_cpu_context() 352 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); in nest_change_cpu_context()
|
/linux/kernel/sched/ |
H A D | fair.c | 3625 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument 3628 int dst_nid = cpu_to_node(new_cpu); in update_scan_period() 3673 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument 7397 int new_cpu = cpu; in sched_balance_find_dst_cpu() local 7425 new_cpu = sched_balance_find_dst_group_cpu(group, p, cpu); in sched_balance_find_dst_cpu() 7426 if (new_cpu == cpu) { in sched_balance_find_dst_cpu() 7432 /* Now try balancing at a lower domain level of 'new_cpu': */ in sched_balance_find_dst_cpu() 7433 cpu = new_cpu; in sched_balance_find_dst_cpu() 7444 return new_cpu; in sched_balance_find_dst_cpu() 8479 int new_cpu in select_task_rq_fair() local 8548 migrate_task_rq_fair(struct task_struct * p,int new_cpu) migrate_task_rq_fair() argument [all...] |
H A D | sched.h | 2400 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
|
H A D | deadline.c | 2235 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) in migrate_task_rq_dl()
|
/linux/drivers/scsi/lpfc/ |
H A D | lpfc_init.c | 12437 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; in lpfc_cpu_affinity_check() local 12501 new_cpu = start_cpu; in lpfc_cpu_affinity_check() 12503 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check() 12508 new_cpu = lpfc_next_present_cpu(new_cpu); in lpfc_cpu_affinity_check() 12520 start_cpu = lpfc_next_present_cpu(new_cpu); in lpfc_cpu_affinity_check() 12526 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check() 12547 new_cpu = start_cpu; in lpfc_cpu_affinity_check() 12549 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check() 12553 new_cpu in lpfc_cpu_affinity_check() [all...] |
/linux/kernel/ |
H A D | workqueue.c | 2215 int new_cpu; in wq_select_unbound_cpu() local 2224 new_cpu = __this_cpu_read(wq_rr_cpu_last); in wq_select_unbound_cpu() 2225 new_cpu = cpumask_next_and_wrap(new_cpu, wq_unbound_cpumask, cpu_online_mask); in wq_select_unbound_cpu() 2226 if (unlikely(new_cpu >= nr_cpu_ids)) in wq_select_unbound_cpu() 2228 __this_cpu_write(wq_rr_cpu_last, new_cpu); in wq_select_unbound_cpu() 2230 return new_cpu; in wq_select_unbound_cpu()
|
/linux/tools/perf/ |
H A D | builtin-sched.c | 1648 bool new_cpu = false; in map_switch_event() local 1663 new_cpu = true; in map_switch_event() 1765 if (sched->map.comp && new_cpu) in map_switch_event()
|