Home
last modified time | relevance | path

Searched full:siblings (Results 1 – 25 of 213) sorted by relevance

123456789

/linux-6.15/tools/testing/selftests/cgroup/
Dmemcg_protection.m6 % hierarchy to illustrate how overcommitted protection spreads among siblings
9 % Simulation assumes siblings consumed the initial amount of memory (w/out
16 % n vector nominal protection of siblings set at the given level (memory.low)
48 siblings = sum(u); variable
52 e = protected * min(1, E / siblings); % normalize overcommit
55 unclaimed = max(0, E - siblings);
56 parent_overuse = sum(c) - siblings;
79 % XXX here I do parallel reclaim of all siblings
/linux-6.15/Documentation/admin-guide/hw-vuln/
Dcore-scheduling.rst100 siblings of a core such that all the selected tasks running on a core are
107 the sibling has the task enqueued. For rest of the siblings in the core,
112 Once a task has been selected for all the siblings in the core, an IPI is sent to
113 siblings for whom a new task was selected. Siblings on receiving the IPI will
125 siblings could be forced to select a lower priority task if the highest
157 and are considered system-wide trusted. The forced-idling of siblings running
174 the siblings to switch to the new task. But there could be hardware delays in
176 cause an attacker task to start running on a CPU before its siblings receive the
177 IPI. Even though cache is flushed on entry to user mode, victim tasks on siblings
185 Core scheduling cannot protect against MDS attacks between the siblings
[all …]
/linux-6.15/drivers/infiniband/hw/irdma/
Dws.c133 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node()
139 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node()
214 list_del(&tc_node->siblings); in irdma_remove_leaf()
219 list_del(&vsi_node->siblings); in irdma_remove_leaf()
295 list_add(&vsi_node->siblings, &ws_tree_root->child_list_head); in irdma_ws_add()
322 list_add(&tc_node->siblings, &vsi_node->child_list_head); in irdma_ws_add()
356 list_del(&tc_node->siblings); in irdma_ws_add()
362 list_del(&vsi_node->siblings); in irdma_ws_add()
Dws.h19 struct list_head siblings; member
/linux-6.15/drivers/gpu/drm/i915/gt/uc/
Dselftest_guc_multi_lrc.c34 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; in multi_lrc_create_parent() local
43 siblings[i++] = engine; in multi_lrc_create_parent()
49 logical_sort(siblings, i); in multi_lrc_create_parent()
51 return intel_engine_create_parallel(siblings, 1, i); in multi_lrc_create_parent()
/linux-6.15/arch/x86/kernel/cpu/microcode/
Dcore.c257 * - HT siblings must be idle and not execute other code while the other sibling
412 * If the update was successful, let the siblings run the apply() in __load_primary()
461 /* Kick soft-offlined SMT siblings if required */ in load_primary()
469 /* Unconditionally release soft-offlined SMT siblings if required */ in load_primary()
544 unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; in load_late_stop_cpus() local
577 case UCODE_OK: siblings++; break; in load_late_stop_cpus()
593 pr_warn("%u offline siblings did not respond.\n", in load_late_stop_cpus()
605 pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); in load_late_stop_cpus()
608 num_online_cpus() - (updated + siblings)); in load_late_stop_cpus()
613 return updated + siblings == num_online_cpus() ? 0 : -EIO; in load_late_stop_cpus()
/linux-6.15/drivers/gpio/
Dgpio-sim.c594 struct list_head siblings; member
628 struct list_head siblings; member
724 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_get_line_names_size()
739 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_set_line_names()
772 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs()
773 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_add_hogs()
790 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs()
791 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_add_hogs()
890 list_for_each_entry(this, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique()
891 list_for_each_entry(pos, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique()
[all …]
Dgpio-virtuser.c1016 struct list_head siblings; member
1035 struct list_head siblings; member
1378 list_for_each_entry(lookup, &dev->lookup_list, siblings) in gpio_virtuser_get_lookup_count()
1403 list_for_each_entry(lookup, &dev->lookup_list, siblings) { in gpio_virtuser_make_lookup_table()
1405 list_for_each_entry(entry, &lookup->entry_list, siblings) { in gpio_virtuser_make_lookup_table()
1444 list_for_each_entry(lookup, &dev->lookup_list, siblings) in gpio_virtuser_make_device_swnode()
1517 list_for_each_entry(lookup, &dev->lookup_list, siblings) { in gpio_virtuser_device_lockup_configfs()
1518 list_for_each_entry(entry, &lookup->entry_list, siblings) { in gpio_virtuser_device_lockup_configfs()
1580 list_del(&entry->siblings); in gpio_virtuser_lookup_entry_config_group_release()
1620 list_add_tail(&entry->siblings, &lookup->entry_list); in gpio_virtuser_make_lookup_entry_group()
[all …]
/linux-6.15/drivers/gpu/drm/i915/gem/
Di915_gem_context.c206 kfree(pc->user_engines[i].siblings); in proto_context_close()
407 struct intel_engine_cs **siblings; in set_proto_ctx_engines_balance() local
445 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); in set_proto_ctx_engines_balance()
446 if (!siblings) in set_proto_ctx_engines_balance()
457 siblings[n] = intel_engine_lookup_user(i915, in set_proto_ctx_engines_balance()
460 if (!siblings[n]) { in set_proto_ctx_engines_balance()
471 set->engines[idx].engine = siblings[0]; in set_proto_ctx_engines_balance()
472 kfree(siblings); in set_proto_ctx_engines_balance()
476 set->engines[idx].siblings = siblings; in set_proto_ctx_engines_balance()
482 kfree(siblings); in set_proto_ctx_engines_balance()
[all …]
Di915_gem_context_types.h102 * i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings.
106 * i915_gem_proto_engine::siblings.
115 /** @num_siblings: Number of balanced or parallel siblings */
121 /** @siblings: Balanced siblings or num_siblings * width for parallel */
122 struct intel_engine_cs **siblings; member
/linux-6.15/drivers/gpu/drm/i915/gt/
Dselftest_execlists.c3712 struct intel_engine_cs **siblings, in nop_virtual_engine() argument
3729 ve[n] = intel_engine_create_virtual(siblings, nsibling, 0); in nop_virtual_engine()
3840 struct intel_engine_cs **siblings, in __select_siblings() argument
3853 siblings[n++] = gt->engine_class[class][inst]; in __select_siblings()
3862 struct intel_engine_cs **siblings) in select_siblings() argument
3864 return __select_siblings(gt, class, siblings, NULL); in select_siblings()
3870 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; in live_virtual_engine() local
3891 nsibling = select_siblings(gt, class, siblings); in live_virtual_engine()
3896 err = nop_virtual_engine(gt, siblings, nsibling, in live_virtual_engine()
3902 err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); in live_virtual_engine()
[all …]
Dintel_execlists_submission.c198 struct intel_engine_cs *siblings[]; member
208 execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
629 * We have to kick all the siblings again in case we need to in __execlists_schedule_out()
1045 if (likely(engine == ve->siblings[0])) in virtual_xfer_context()
1059 if (ve->siblings[n] == engine) { in virtual_xfer_context()
1060 swap(ve->siblings[n], ve->siblings[0]); in virtual_xfer_context()
1434 str_yes_no(engine != ve->siblings[0])); in execlists_dequeue()
1456 * ve->siblings[] on an idle context, where in execlists_dequeue()
1457 * we may be using ve->siblings[] in in execlists_dequeue()
1461 GEM_BUG_ON(ve->siblings[0] != engine); in execlists_dequeue()
[all …]
/linux-6.15/include/uapi/linux/
Dmembarrier.h70 * threads siblings have passed through a state
94 * call, that all its running threads siblings
120 * siblings have any currently running rseq
/linux-6.15/arch/s390/kernel/
Dhiperdispatch.c109 const struct cpumask *siblings; in hd_add_core() local
114 siblings = topology_sibling_cpumask(cpu); in hd_add_core()
121 cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings); in hd_add_core()
125 cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings); in hd_add_core()
/linux-6.15/tools/testing/selftests/drivers/platform/x86/intel/ifs/
Dtest_ifs.sh32 readonly SIBLINGS="siblings"
381 "$SIBLINGS")
446 ifs_test_cpus "$SIBLINGS" "$IFS_SCAN_MODE" "$image_id"
485 ifs_test_cpus "$SIBLINGS" "$IFS_ARRAY_BIST_SCAN_MODE"
/linux-6.15/mm/
Dpage_counter.c297 * parent's and siblings' settings, as well as the actual memory
317 * the unused remainder is available to its siblings.
334 * against immediate siblings whereas 5. is about protecting against
355 * but unused protection is available to siblings that would in effective_protection()
374 * consumed among the siblings anyway. in effective_protection()
/linux-6.15/Documentation/devicetree/bindings/net/dsa/
Dmarvell,mv88e6xxx.yaml28 This switch and its siblings will be autodetected from
34 This switch and its siblings will be autodetected from
39 This switch and its siblings will be autodetected from
/linux-6.15/tools/testing/selftests/landlock/
Dscoped_base_variants.h69 * Parent + child domain (siblings)
139 * Inherited + parent and child domain (siblings)
/linux-6.15/kernel/
DKconfig.preempt140 selection across SMT siblings. When enabled -- see
141 prctl(PR_SCHED_CORE) -- task selection ensures that all SMT siblings
/linux-6.15/drivers/nvme/host/
Dmultipath.c229 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_revalidate_paths()
263 list_for_each_entry_srcu(ns, &head->list, siblings, in __nvme_find_path()
302 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
303 siblings); in nvme_next_ns()
306 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns()
363 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_queue_depth_path()
432 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_available_path()
1053 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_add_sysfs_link()
/linux-6.15/arch/x86/power/
Dhibernate.c194 * that SMT siblings are sleeping in hlt, as mwait is not safe in arch_resume_nosmt()
199 * SMT siblings out of hlt, and offline them again so that they in arch_resume_nosmt()
/linux-6.15/drivers/iommu/
Dfsl_pamu_domain.h22 struct list_head link; /* link to domain siblings */
/linux-6.15/arch/sparc/include/asm/
Doplib_32.h118 * siblings exist.
145 /* Search all siblings starting at the passed node for "name" matching
/linux-6.15/tools/perf/util/
Ddwarf-aux.h74 DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
75 DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
/linux-6.15/arch/powerpc/platforms/pseries/
Dsmp.c206 /* Doorbells can only be used for IPIs between SMT siblings */ in pSeries_smp_probe()
232 * Under PowerVM, FSCR[MSGP] is enabled as guest vCPU siblings are in pSeries_smp_probe()

123456789