| /linux/kernel/irq/ |
| H A D | cpuhotplug.c | 19 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */ 28 * which do not implement effective affinity, but the architecture has in irq_needs_fixup() 29 * enabled the config switch. Use the general affinity mask instead. in irq_needs_fixup() 45 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", in irq_needs_fixup() 58 const struct cpumask *affinity; in migrate_one_irq() local 64 * still in the radix tree. Also if the chip has no affinity setter, in migrate_one_irq() 100 * mask, so the last change of the affinity does not get lost. If in migrate_one_irq() 102 * any online CPU, use the current affinity mask. in migrate_one_irq() 105 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq() 107 affinity in migrate_one_irq() 207 const struct cpumask *affinity = irq_data_get_affinity_mask(data); irq_restore_affinity_of_irq() local [all...] |
| H A D | irqdesc.c | 57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks() 64 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 74 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 86 static void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) in desc_smp_init() argument 88 if (!affinity) in desc_smp_init() 89 affinity = irq_default_affinity; in desc_smp_init() 90 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init() 106 free_cpumask_var(desc->irq_common_data.affinity); in free_masks() 116 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument 121 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument [all …]
|
| H A D | manage.c | 293 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity() 355 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_set_affinity_deactivated() 419 int irq_update_affinity_desc(unsigned int irq, struct irq_affinity_desc *affinity) in irq_update_affinity_desc() argument 447 if (affinity->is_managed) { in irq_update_affinity_desc() 452 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); in irq_update_affinity_desc() 532 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify() 611 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity() 613 set = desc->irq_common_data.affinity; in irq_setup_affinity() 1446 if (cpumask_intersects(old->affinity, new->affinity) || in valid_percpu_irqaction() 2421 if (cpumask_intersects(desc->percpu_enabled, action->affinity)) { in __free_percpu_irq() [all …]
|
| H A D | irq_test.c | 120 struct irq_affinity_desc affinity = { in irq_shutdown_depth_test() local 128 virq = irq_test_setup_fake_irq(test, &affinity); in irq_shutdown_depth_test() 172 struct irq_affinity_desc affinity = { in irq_cpuhotplug_test() local 185 cpumask_copy(&affinity.mask, cpumask_of(1)); in irq_cpuhotplug_test() 187 virq = irq_test_setup_fake_irq(test, &affinity); in irq_cpuhotplug_test()
|
| H A D | irqdomain.c | 30 bool realloc, const struct irq_affinity_desc *affinity); 783 const struct irq_affinity_desc *affinity) in irq_create_mapping_affinity_locked() argument 792 affinity); in irq_create_mapping_affinity_locked() 822 const struct irq_affinity_desc *affinity) in irq_create_mapping_affinity() argument 843 virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity); in irq_create_mapping_affinity() 1284 int node, const struct irq_affinity_desc *affinity) in irq_domain_alloc_descs() argument 1290 affinity); in irq_domain_alloc_descs() 1296 affinity); in irq_domain_alloc_descs() 1299 affinity); in irq_domain_alloc_descs() 1629 bool realloc, const struct irq_affinity_desc *affinity) in irq_domain_alloc_irqs_locked() argument [all …]
|
| /linux/tools/testing/selftests/rseq/ |
| H A D | basic_test.c | 18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local 21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer() 24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer() 39 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
|
| /linux/tools/perf/util/ |
| H A D | affinity.h | 8 struct affinity { struct 14 void affinity__cleanup(struct affinity *a); argument 15 void affinity__set(struct affinity *a, int cpu); 16 int affinity__setup(struct affinity *a);
|
| H A D | affinity.c | 25 int affinity__setup(struct affinity *a) in affinity__setup() 49 void affinity__set(struct affinity *a, int cpu) in affinity__set() 72 static void __affinity__cleanup(struct affinity *a) in __affinity__cleanup() 82 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
|
| H A D | mmap.c | 98 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) in perf_mmap__aio_bind() argument 106 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { in perf_mmap__aio_bind() 142 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind() 176 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap() 269 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask() 271 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask() 285 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
|
| /linux/Documentation/arch/arm64/ |
| H A D | asymmetric-32bit.rst | 51 CPU affinity. 68 On a homogeneous system, the CPU affinity of a task is preserved across 71 affinity mask contains 64-bit-only CPUs. In this situation, the kernel 72 determines the new affinity mask as follows: 74 1. If the 32-bit-capable subset of the affinity mask is not empty, 75 then the affinity is restricted to that subset and the old affinity 84 affinity of the task is then changed to match the 32-bit-capable 87 3. On failure (i.e. out of memory), the affinity is changed to the set 91 invalidate the affinity mask saved in (1) and attempt to restore the CPU 92 affinity of the task using the saved mask if it was previously valid. [all …]
|
| /linux/tools/virtio/ringtest/ |
| H A D | run-on-all.sh | 20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu 24 "$@" --host-affinity $HOST_AFFINITY
|
| /linux/drivers/perf/ |
| H A D | arm_pmu_platform.c | 46 const struct cpumask *affinity) in pmu_parse_percpu_irq() argument 51 cpumask_copy(&pmu->supported_cpus, affinity); in pmu_parse_percpu_irq() 117 const struct cpumask *affinity; in pmu_parse_irqs() local 120 irq = platform_get_irq_affinity(pdev, 0, &affinity); in pmu_parse_irqs() 122 return pmu_parse_percpu_irq(pmu, irq, affinity); in pmu_parse_irqs()
|
| H A D | arm_pmu.c | 29 static int armpmu_count_irq_users(const struct cpumask *affinity, 587 static int armpmu_count_irq_users(const struct cpumask *affinity, const int irq) in armpmu_count_irq_users() argument 591 for_each_cpu(cpu, affinity) { in armpmu_count_irq_users() 600 armpmu_find_irq_ops(const struct cpumask *affinity, int irq) in armpmu_find_irq_ops() argument 605 for_each_cpu(cpu, affinity) { in armpmu_find_irq_ops() 634 const struct cpumask *affinity = *armpmu ? &(*armpmu)->supported_cpus : in armpmu_request_irq() local 668 } else if (armpmu_count_irq_users(affinity, irq) == 0) { in armpmu_request_irq() 669 err = request_percpu_nmi(irq, handler, "arm-pmu", affinity, pcpu_armpmu); in armpmu_request_irq() 674 affinity, pcpu_armpmu); in armpmu_request_irq() 682 irq_ops = armpmu_find_irq_ops(affinity, irq); in armpmu_request_irq()
|
| /linux/kernel/ |
| H A D | kthread.c | 353 cpumask_var_t affinity; in kthread_affine_node() local 358 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) { in kthread_affine_node() 373 kthread_fetch_affinity(kthread, affinity); in kthread_affine_node() 374 set_cpus_allowed_ptr(current, affinity); in kthread_affine_node() 377 free_cpumask_var(affinity); in kthread_affine_node() 842 cpumask_var_t affinity; in kthread_affine_preferred() local 852 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) in kthread_affine_preferred() 865 kthread_fetch_affinity(kthread, affinity); in kthread_affine_preferred() 868 set_cpus_allowed_force(p, affinity); in kthread_affine_preferred() 872 free_cpumask_var(affinity); in kthread_affine_preferred() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | affinity.c | 955 struct hfi1_affinity_node_list *affinity) in find_hw_thread_mask() argument 960 cpumask_copy(hw_thread_mask, &affinity->proc.mask); in find_hw_thread_mask() 962 if (affinity->num_core_siblings == 0) in find_hw_thread_mask() 965 num_cores = rounddown(node_affinity.num_online_cpus / affinity->num_core_siblings, in find_hw_thread_mask() 983 struct hfi1_affinity_node_list *affinity = &node_affinity; in hfi1_get_proc_affinity() local 984 struct cpu_mask_set *set = &affinity->proc; in hfi1_get_proc_affinity() 1042 mutex_lock(&affinity->lock); in hfi1_get_proc_affinity() 1072 for (i = 0; i < affinity->num_core_siblings; i++) { in hfi1_get_proc_affinity() 1073 find_hw_thread_mask(i, hw_thread_mask, affinity); in hfi1_get_proc_affinity() 1143 mutex_unlock(&affinity->lock); in hfi1_get_proc_affinity() [all …]
|
| /linux/arch/arm64/kernel/ |
| H A D | setup.c | 113 u32 i, affinity, fs[4], bits[4], ls; in smp_build_mpidr_hash() local 127 affinity = MPIDR_AFFINITY_LEVEL(mask, i); in smp_build_mpidr_hash() 133 ls = fls(affinity); in smp_build_mpidr_hash() 134 fs[i] = affinity ? ffs(affinity) - 1 : 0; in smp_build_mpidr_hash()
|
| /linux/include/linux/ |
| H A D | interrupt.h | 112 * @affinity: CPUs this irqaction is allowed to run on 129 const struct cpumask *affinity; member 185 const cpumask_t *affinity, void __percpu *percpu_dev_id); 201 const struct cpumask *affinity, void __percpu *dev_id); in request_percpu_irq_affinity() 260 * struct irq_affinity_notify - context for notification of IRQ affinity changes 282 * struct irq_affinity - Description for automatic irq affinity assignments 283 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of 285 * @post_vectors: Don't apply affinity to @post_vectors at end of 287 * @nr_sets: The number of interrupt sets for which affinity 305 * struct irq_affinity_desc - Interrupt affinity descripto 202 request_percpu_irq_affinity(unsigned int irq,irq_handler_t handler,const char * devname,const cpumask_t * affinity,void __percpu * percpu_dev_id) request_percpu_irq_affinity() argument 424 irq_update_affinity_desc(unsigned int irq,struct irq_affinity_desc * affinity) irq_update_affinity_desc() argument [all...] |
| /linux/Documentation/core-api/irq/ |
| H A D | irq-affinity.rst | 2 SMP IRQ affinity 14 IRQ affinity then the value will not change from the default of all CPUs. 16 /proc/irq/default_smp_affinity specifies default affinity mask that applies 17 to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
|
| /linux/Documentation/translations/zh_CN/core-api/irq/ |
| H A D | irq-affinity.rst | 3 :Original: Documentation/core-api/irq/irq-affinity.rst 9 .. _cn_irq-affinity.rst: 23 (IRQ affinity),那么所有cpu的默认值将保持不变(即关联到所有CPU).
|
| /linux/arch/alpha/kernel/ |
| H A D | sys_dp264.c | 136 cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in cpu_set_irq_affinity() argument 142 if (cpumask_test_cpu(cpu, &affinity)) in cpu_set_irq_affinity() 151 dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, in dp264_set_affinity() argument 155 cpu_set_irq_affinity(d->irq, *affinity); in dp264_set_affinity() 163 clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, in clipper_set_affinity() argument 167 cpu_set_irq_affinity(d->irq - 16, *affinity); in clipper_set_affinity()
|
| H A D | sys_titan.c | 135 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in titan_cpu_set_irq_affinity() argument 140 if (cpumask_test_cpu(cpu, &affinity)) in titan_cpu_set_irq_affinity() 149 titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, in titan_set_irq_affinity() argument 154 titan_cpu_set_irq_affinity(irq - 16, *affinity); in titan_set_irq_affinity()
|
| /linux/drivers/irqchip/ |
| H A D | irq-bcm7038-l1.c | 47 u8 affinity[MAX_WORDS * IRQS_PER_WORD]; member 173 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); in bcm7038_l1_unmask() 183 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); in bcm7038_l1_mask() 202 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity() 204 __bcm7038_l1_mask(d, intc->affinity[hw]); in bcm7038_l1_set_affinity() 205 intc->affinity[hw] = first_cpu; in bcm7038_l1_set_affinity()
|
| /linux/tools/perf/ |
| H A D | builtin-record.c | 100 struct mmap_cpu_mask affinity; member 1290 if (opts->affinity != PERF_AFFINITY_SYS) in record__mmap_evlist() 1296 opts->nr_cblocks, opts->affinity, in record__mmap_evlist() 1578 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity() 1579 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, in record__adjust_affinity() 1580 thread->mask->affinity.nbits)) { in record__adjust_affinity() 1581 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1582 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity() 1583 map->affinity_mask.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1584 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__adjust_affinity() [all …]
|
| /linux/tools/testing/selftests/rcutorture/bin/ |
| H A D | kvm-test-1-run-batch.sh | 65 print "echo No CPU-affinity information, so no taskset command."; 71 print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
|
| /linux/drivers/resctrl/ |
| H A D | mpam_devices.c | 449 cpumask_t *affinity) in mpam_get_cpumask_from_cache_id() argument 451 return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); in mpam_get_cpumask_from_cache_id() 458 static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) in get_cpumask_from_node_id() argument 464 cpumask_set_cpu(cpu, affinity); in get_cpumask_from_node_id() 468 static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, in mpam_ris_get_affinity() argument 478 affinity); in mpam_ris_get_affinity() 485 if (cpumask_empty(affinity)) in mpam_ris_get_affinity() 490 get_cpumask_from_node_id(comp->comp_id, affinity); in mpam_ris_get_affinity() 497 cpumask_and(affinity, affinity, &msc->accessibility); in mpam_ris_get_affinity() 545 err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); in mpam_ris_create_locked() [all …]
|