Home
last modified time | relevance | path

Searched refs:sibling (Results 1 – 25 of 219) sorted by relevance

123456789

/linux/tools/lib/
H A Drbtree.c230 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; in ____rb_erase_color() local
240 sibling = parent->rb_right; in ____rb_erase_color()
241 if (node != sibling) { /* node == parent->rb_left */ in ____rb_erase_color()
242 if (rb_is_red(sibling)) { in ____rb_erase_color()
252 tmp1 = sibling->rb_left; in ____rb_erase_color()
254 WRITE_ONCE(sibling->rb_left, parent); in ____rb_erase_color()
256 __rb_rotate_set_parents(parent, sibling, root, in ____rb_erase_color()
258 augment_rotate(parent, sibling); in ____rb_erase_color()
259 sibling = tmp1; in ____rb_erase_color()
261 tmp1 = sibling->rb_right; in ____rb_erase_color()
[all …]
/linux/lib/
H A Drbtree.c230 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; in ____rb_erase_color() local
240 sibling = parent->rb_right; in ____rb_erase_color()
241 if (node != sibling) { /* node == parent->rb_left */ in ____rb_erase_color()
242 if (rb_is_red(sibling)) { in ____rb_erase_color()
252 tmp1 = sibling->rb_left; in ____rb_erase_color()
254 WRITE_ONCE(sibling->rb_left, parent); in ____rb_erase_color()
256 __rb_rotate_set_parents(parent, sibling, root, in ____rb_erase_color()
258 augment_rotate(parent, sibling); in ____rb_erase_color()
259 sibling = tmp1; in ____rb_erase_color()
261 tmp1 = sibling->rb_right; in ____rb_erase_color()
[all …]
/linux/kernel/
H A Dresource.c64 while (!p->sibling && p->parent) { in next_resource()
69 return p->sibling; in next_resource()
197 new->sibling = tmp; in __request_resource()
202 p = &tmp->sibling; in __request_resource()
220 *p = tmp->sibling; in __release_resource()
222 for (chd = tmp->child;; chd = chd->sibling) { in __release_resource()
224 if (!(chd->sibling)) in __release_resource()
228 chd->sibling = tmp->sibling; in __release_resource()
233 p = &tmp->sibling; in __release_resource()
247 p = p->sibling; in __release_child_resources()
[all …]
/linux/arch/s390/kernel/
H A Dguarded_storage.c97 struct task_struct *sibling; in gs_broadcast() local
100 for_each_thread(current, sibling) { in gs_broadcast()
101 if (!sibling->thread.gs_bc_cb) in gs_broadcast()
103 if (test_and_set_tsk_thread_flag(sibling, TIF_GUARDED_STORAGE)) in gs_broadcast()
104 kick_process(sibling); in gs_broadcast()
H A DMakefile35 CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
36 CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
37 CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
/linux/drivers/media/platform/amphion/
H A Dvpu_helpers.c66 const struct vpu_format *sibling; in vpu_helper_find_sibling() local
69 if (!fmt || !fmt->sibling) in vpu_helper_find_sibling()
72 sibling = vpu_helper_find_format(inst, type, fmt->sibling); in vpu_helper_find_sibling()
73 if (!sibling || sibling->sibling != fmt->pixfmt || in vpu_helper_find_sibling()
74 sibling->comp_planes != fmt->comp_planes) in vpu_helper_find_sibling()
77 return sibling; in vpu_helper_find_sibling()
82 const struct vpu_format *sibling; in vpu_helper_match_format() local
87 sibling = vpu_helper_find_sibling(inst, type, fmta); in vpu_helper_match_format()
88 if (sibling && sibling->pixfmt == fmtb) in vpu_helper_match_format()
/linux/arch/x86/kernel/
H A Dsmpboot.c1140 int sibling; in remove_siblinginfo() local
1143 for_each_cpu(sibling, topology_core_cpumask(cpu)) { in remove_siblinginfo()
1144 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_siblinginfo()
1149 cpu_data(sibling).booted_cores--; in remove_siblinginfo()
1152 for_each_cpu(sibling, topology_die_cpumask(cpu)) in remove_siblinginfo()
1153 cpumask_clear_cpu(cpu, topology_die_cpumask(sibling)); in remove_siblinginfo()
1155 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) { in remove_siblinginfo()
1156 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_siblinginfo()
1157 if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1) in remove_siblinginfo()
1158 cpu_data(sibling).smt_active = false; in remove_siblinginfo()
[all …]
/linux/arch/sparc/kernel/
H A Dpci_psycho.c191 if (pbm->sibling) in psycho_ue_intr()
192 psycho_check_iommu_error(pbm->sibling, afsr, afar, UE_ERR); in psycho_ue_intr()
528 pbm->sibling = psycho_find_sibling(upa_portid); in psycho_probe()
529 if (pbm->sibling) { in psycho_probe()
530 iommu = pbm->sibling->iommu; in psycho_probe()
565 if (!pbm->sibling) { in psycho_probe()
581 if (pbm->sibling) in psycho_probe()
582 pbm->sibling->sibling = pbm; in psycho_probe()
589 if (!pbm->sibling) in psycho_probe()
/linux/drivers/base/
H A Dcacheinfo.c455 unsigned int sibling, index, sib_index; in cache_shared_cpu_map_remove() local
459 for_each_cpu(sibling, &this_leaf->shared_cpu_map) { in cache_shared_cpu_map_remove()
460 if (sibling == cpu || !per_cpu_cacheinfo(sibling)) in cache_shared_cpu_map_remove()
463 for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { in cache_shared_cpu_map_remove()
464 sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); in cache_shared_cpu_map_remove()
477 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); in cache_shared_cpu_map_remove()
937 unsigned int sibling; in cpu_map_shared_cache() local
953 for_each_cpu(sibling, &llc->shared_cpu_map) { in cpu_map_shared_cache()
954 if (sibling == cpu || !last_level_cache_is_valid(sibling)) in cpu_map_shared_cache()
956 sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1); in cpu_map_shared_cache()
H A Darch_topology.c812 int sibling; in remove_cpu_topology() local
814 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
815 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
816 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
817 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
818 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) in remove_cpu_topology()
819 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); in remove_cpu_topology()
820 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
821 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
/linux/tools/testing/selftests/seccomp/
H A Dseccomp_bpf.c2558 struct tsync_sibling sibling[TSYNC_SIBLINGS]; in FIXTURE() local
2580 memset(&self->sibling, 0, sizeof(self->sibling)); in FIXTURE_SETUP()
2595 self->sibling[0].tid = 0; in FIXTURE_SETUP()
2596 self->sibling[0].cond = &self->cond; in FIXTURE_SETUP()
2597 self->sibling[0].started = &self->started; in FIXTURE_SETUP()
2598 self->sibling[0].mutex = &self->mutex; in FIXTURE_SETUP()
2599 self->sibling[0].diverge = 0; in FIXTURE_SETUP()
2600 self->sibling[0].num_waits = 1; in FIXTURE_SETUP()
2601 self->sibling[0].prog = &self->root_prog; in FIXTURE_SETUP()
2602 self->sibling[0].metadata = _metadata; in FIXTURE_SETUP()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_execlists_submission.c3631 struct intel_engine_cs *sibling = ve->siblings[n]; in rcu_virtual_context_destroy() local
3632 struct rb_node *node = &ve->nodes[sibling->id].rb; in rcu_virtual_context_destroy()
3637 spin_lock_irq(&sibling->sched_engine->lock); in rcu_virtual_context_destroy()
3641 rb_erase_cached(node, &sibling->execlists.virtual); in rcu_virtual_context_destroy()
3643 spin_unlock_irq(&sibling->sched_engine->lock); in rcu_virtual_context_destroy()
3750 virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling) in virtual_get_sibling() argument
3754 if (sibling >= ve->num_siblings) in virtual_get_sibling()
3757 return ve->siblings[sibling]; in virtual_get_sibling()
3821 struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]); in virtual_submission_tasklet() local
3822 struct ve_node * const node = &ve->nodes[sibling->id]; in virtual_submission_tasklet()
[all …]
/linux/drivers/powercap/
H A Ddtpm.c72 list_for_each_entry(child, &dtpm->children, sibling) { in __get_power_uw()
91 list_for_each_entry(child, &dtpm->children, sibling) { in __dtpm_rebalance_weight()
178 list_del(&dtpm->sibling); in dtpm_release_zone()
231 list_for_each_entry(child, &dtpm->children, sibling) { in __set_power_limit_uw()
323 INIT_LIST_HEAD(&dtpm->sibling); in dtpm_init()
398 list_add_tail(&dtpm->sibling, &parent->children); in dtpm_register()
622 list_for_each_entry_safe(child, aux, &dtpm->children, sibling) in __dtpm_destroy_hierarchy()
/linux/Documentation/devicetree/
H A Dof_unittest.rst91 struct device_node *sibling;
96 considering only child and sibling pointers. There exists another pointer,
98 a particular level the child node and all the sibling nodes will have a parent
161 replaces the current child and turns it into its sibling. So, when the testcase
202 sibling compared to the earlier structure (Figure 2). After attaching first
204 (i.e. test-child0) to become a sibling and makes itself a child node,
223 node's parent to its sibling or attaches the previous sibling to the given
224 node's sibling, as appropriate. That is it :)
/linux/kernel/sched/
H A Dtopology.c915 struct sched_domain *sibling; in build_balance_mask() local
921 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
928 if (!sibling->child) in build_balance_mask()
932 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) in build_balance_mask()
1000 find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling) in find_descended_sibling() argument
1006 while (sibling->child && in find_descended_sibling()
1007 !cpumask_subset(sched_domain_span(sibling->child), in find_descended_sibling()
1009 sibling = sibling->child; in find_descended_sibling()
1016 while (sibling->child && in find_descended_sibling()
1017 cpumask_equal(sched_domain_span(sibling->child), in find_descended_sibling()
[all …]
/linux/drivers/pci/hotplug/
H A Dacpiphp_glue.c161 list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) in free_bridge()
330 list_add_tail(&newfunc->sibling, &slot->funcs); in acpiphp_add_context()
345 list_for_each_entry(func, &slot->funcs, sibling) { in cleanup_bridge()
398 list_for_each_entry(func, &slot->funcs, sibling) { in acpiphp_set_acpi_region()
422 list_for_each_entry(func, &slot->funcs, sibling) { in check_hotplug_bridge()
434 list_for_each_entry(func, &slot->funcs, sibling) { in acpiphp_rescan_slot()
536 list_for_each_entry(func, &slot->funcs, sibling) { in enable_slot()
569 list_for_each_entry(func, &slot->funcs, sibling) in disable_slot()
605 list_for_each_entry(func, &slot->funcs, sibling) { in get_slot_status()
1014 list_for_each_entry(func, &slot->funcs, sibling) in acpiphp_disable_and_eject_slot()
/linux/Documentation/admin-guide/hw-vuln/
H A Dcross-thread-rsb.rst8 predictions vulnerability. When running in SMT mode and one sibling thread
9 transitions out of C0 state, the other sibling thread could use return target
10 predictions from the sibling thread that transitioned out of C0.
16 being consumed by the sibling thread.
52 used by RET predictions in the sibling thread following a 1T/2T switch. In
H A Dcore-scheduling.rst105 During a schedule() event on any sibling of a core, the highest priority task on
106 the sibling's core is picked and assigned to the sibling calling schedule(), if
107 the sibling has the task enqueued. For rest of the siblings in the core,
114 switch to the new task immediately. If an idle task is selected for a sibling,
115 then the sibling is considered to be in a `forced idle` state. I.e., it may
127 task. If a sibling does not have a trusted task to run, it will be forced idle
131 the sibling to force it into idle. This results in 4 cases which need to be
189 sibling. Such attacks are possible for any combination of sibling CPU modes
212 sibling hyperthreads from one another. Prototypes of mitigations have been posted
/linux/drivers/sh/clk/
H A Dcore.c217 list_del_init(&child->sibling); in clk_reparent()
219 list_add(&child->sibling, &parent->children); in clk_reparent()
230 list_for_each_entry(clkp, &tclk->children, sibling) { in propagate_rate()
321 list_for_each_entry(clkp, &root_clks, sibling) { in recalculate_root_clocks()
434 list_add(&clk->sibling, &clk->parent->children); in clk_register()
436 list_add(&clk->sibling, &root_clks); in clk_register()
455 list_del(&clk->sibling); in clk_unregister()
/linux/drivers/perf/hisilicon/
H A Dhisi_pcie_pmu.c336 struct perf_event *sibling, *leader = event->group_leader; in hisi_pcie_pmu_validate_event_group() local
350 for_each_sibling_event(sibling, event->group_leader) { in hisi_pcie_pmu_validate_event_group()
351 if (is_software_event(sibling)) in hisi_pcie_pmu_validate_event_group()
354 if (sibling->pmu != event->pmu) in hisi_pcie_pmu_validate_event_group()
362 if (hisi_pcie_pmu_cmp_event(event_group[num], sibling)) in hisi_pcie_pmu_validate_event_group()
375 event_group[counters++] = sibling; in hisi_pcie_pmu_validate_event_group()
426 struct perf_event *sibling; in hisi_pcie_pmu_get_event_idx() local
430 sibling = pcie_pmu->hw_events[idx]; in hisi_pcie_pmu_get_event_idx()
431 if (!sibling) { in hisi_pcie_pmu_get_event_idx()
438 if (hisi_pcie_pmu_cmp_event(sibling, event) && in hisi_pcie_pmu_get_event_idx()
[all …]
/linux/drivers/perf/
H A Dqcom_l2_pmu.c442 struct perf_event *sibling; in l2_cache_event_init() local
479 for_each_sibling_event(sibling, event->group_leader) { in l2_cache_event_init()
480 if (sibling->pmu != event->pmu && in l2_cache_event_init()
481 !is_software_event(sibling)) { in l2_cache_event_init()
516 for_each_sibling_event(sibling, event->group_leader) { in l2_cache_event_init()
517 if ((sibling != event) && in l2_cache_event_init()
518 !is_software_event(sibling) && in l2_cache_event_init()
519 (L2_EVT_GROUP(sibling->attr.config) == in l2_cache_event_init()
523 sibling->attr.config, in l2_cache_event_init()
H A Ddwc_pcie_pmu.c403 struct perf_event *sibling, *leader = event->group_leader; in dwc_pcie_pmu_validate_group() local
415 for_each_sibling_event(sibling, leader) { in dwc_pcie_pmu_validate_group()
416 type = DWC_PCIE_EVENT_TYPE(sibling); in dwc_pcie_pmu_validate_group()
425 if (dwc_pcie_pmu_validate_add_lane_event(sibling, val_lane_events)) in dwc_pcie_pmu_validate_group()
436 struct perf_event *sibling; in dwc_pcie_pmu_event_init() local
450 for_each_sibling_event(sibling, event->group_leader) { in dwc_pcie_pmu_event_init()
451 if (sibling->pmu != event->pmu && !is_software_event(sibling)) in dwc_pcie_pmu_event_init()
/linux/net/netfilter/
H A Dnf_conntrack_pptp.c133 struct nf_conn *sibling; in destroy_sibling_or_exp() local
141 sibling = nf_ct_tuplehash_to_ctrack(h); in destroy_sibling_or_exp()
142 pr_debug("setting timeout of conntrack %p to 0\n", sibling); in destroy_sibling_or_exp()
143 sibling->proto.gre.timeout = 0; in destroy_sibling_or_exp()
144 sibling->proto.gre.stream_timeout = 0; in destroy_sibling_or_exp()
145 nf_ct_kill(sibling); in destroy_sibling_or_exp()
146 nf_ct_put(sibling); in destroy_sibling_or_exp()
/linux/arch/powerpc/platforms/85xx/
H A Dsmp.c390 int sibling = cpu_last_thread_sibling(cpu); in mpc85xx_smp_kexec_cpu_down() local
410 } else if (sibling == crashing_cpu) { in mpc85xx_smp_kexec_cpu_down()
414 if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) { in mpc85xx_smp_kexec_cpu_down()
416 disable_cpu = sibling; in mpc85xx_smp_kexec_cpu_down()
/linux/arch/x86/kernel/cpu/
H A Dcacheinfo.c493 int i, sibling; in __cache_amd_cpumap_setup() local
506 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { in __cache_amd_cpumap_setup()
507 if (!cpu_online(sibling)) in __cache_amd_cpumap_setup()
509 cpumask_set_cpu(sibling, &ci->shared_cpu_map); in __cache_amd_cpumap_setup()
531 for_each_online_cpu(sibling) { in __cache_amd_cpumap_setup()
532 apicid = cpu_data(sibling).topo.apicid; in __cache_amd_cpumap_setup()
535 cpumask_set_cpu(sibling, &ci->shared_cpu_map); in __cache_amd_cpumap_setup()

123456789