| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | cgroup_hierarchical_stats.c | 54 } cgroups[] = { variable 64 #define N_CGROUPS ARRAY_SIZE(cgroups) 133 fd = create_and_get_cgroup(cgroups[i].path); in setup_cgroups() 137 cgroups[i].fd = fd; in setup_cgroups() 138 cgroups[i].id = get_cgroup_id(cgroups[i].path); in setup_cgroups() 147 close(cgroups[i].fd); in cleanup_cgroups() 175 if (join_parent_cgroup(cgroups[i].path)) in attach_processes() 220 attach_counters[i] = get_attach_counter(cgroups[i].id, in check_attach_counters() 221 cgroups[i].name); in check_attach_counters() 288 err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); in setup_progs() [all …]
|
| /linux/tools/cgroup/ |
| H A D | memcg_shrinker.py | 11 cgroups = {} 17 cgroups[ino] = path 20 return cgroups 44 cgroups = scan_cgroups("/sys/fs/cgroup/") 58 cg = cgroups[ino]
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | cgroups.rst | 21 1.1 What are cgroups ? 22 1.2 Why are cgroups needed ? 23 1.3 How are cgroups implemented ? 26 1.6 How do I use cgroups ? 41 1.1 What are cgroups ? 54 facilities provided by cgroups to treat groups of tasks in 60 A *hierarchy* is a set of cgroups arranged in a tree, such that 61 every task in the system is in exactly one of the cgroups in the 67 cgroups. Each hierarchy is a partition of all tasks in the system. 69 User-level code may create and destroy cgroups by name in an [all …]
|
| H A D | net_cls.rst | 9 different priorities to packets from different cgroups. 13 Creating a net_cls cgroups instance creates a net_cls.classid file.
|
| H A D | devices.rst | 43 Any task can move itself between cgroups. This clearly won't 60 device cgroups maintain hierarchy by making sure a cgroup never has more 121 not be possible once the device cgroups has children. 126 device cgroups is implemented internally using a behavior (ALLOW, DENY) and a
|
| H A D | freezer-subsystem.rst | 9 whole. The cgroup freezer uses cgroups to describe the set of tasks to 57 tasks belonging to the cgroup and all its descendant cgroups. Each 73 to the cgroup or one of its descendant cgroups until the new task is 79 descendant cgroups.
|
| H A D | index.rst | 10 cgroups
|
| H A D | memory.rst | 293 The reclaim algorithm has not been modified for cgroups, except that 332 Kernel memory accounting is enabled for all memory cgroups by default. But 409 2. Prepare the cgroups (see :ref:`Why are cgroups needed? 410 <cgroups-why-needed>` for the background information):: 617 (Note: file and shmem may be shared among other cgroups. In that case, 681 The hierarchy is created by creating the appropriate cgroups in the 745 reclaiming memory for balancing between memory cgroups 764 Memory cgroup implements memory thresholds using the cgroups notification 765 API (see cgroups.txt). It allows to register multiple memory and memsw 790 API (See cgroups.txt). It allows to register multiple OOM notification [all …]
|
| /linux/tools/perf/util/ |
| H A D | cgroup.c | 562 down_write(&env->cgroups.lock); in cgroup__findnew() 563 cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path); in cgroup__findnew() 564 up_write(&env->cgroups.lock); in cgroup__findnew() 577 down_read(&env->cgroups.lock); in cgroup__find() 578 cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL); in cgroup__find() 579 up_read(&env->cgroups.lock); in cgroup__find() 588 down_write(&env->cgroups.lock); in perf_env__purge_cgroups() 589 while (!RB_EMPTY_ROOT(&env->cgroups.tree)) { in perf_env__purge_cgroups() 590 node = rb_first(&env->cgroups.tree); in perf_env__purge_cgroups() 593 rb_erase(node, &env->cgroups.tree); in perf_env__purge_cgroups() [all …]
|
| H A D | bpf-trace-summary.c | 23 static struct rb_root cgroups = RB_ROOT; variable 54 read_all_cgroups(&cgroups); in trace_prepare_bpf_summary() 345 struct cgroup *cgrp = __cgroup__find(&cgroups, data->key); in print_cgroup_stat() 454 if (!RB_EMPTY_ROOT(&cgroups)) { in trace_cleanup_bpf_summary() 457 rbtree_postorder_for_each_entry_safe(cgrp, tmp, &cgroups, node) in trace_cleanup_bpf_summary() 460 cgroups = RB_ROOT; in trace_cleanup_bpf_summary()
|
| H A D | cgroup.h | 31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups, bool open_cgroup);
|
| H A D | bpf_lock_contention.c | 384 read_all_cgroups(&con->cgroups); in lock_contention_prepare() 622 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); in lock_contention_get_name() 840 while (!RB_EMPTY_ROOT(&con->cgroups)) { in lock_contention_finish() 841 struct rb_node *node = rb_first(&con->cgroups); in lock_contention_finish() 844 rb_erase(node, &con->cgroups); in lock_contention_finish()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | percpu_alloc_cgrp_local_storage.c | 30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG() 56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG() 89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
|
| H A D | rcu_read_lock.c | 34 struct css_set *cgroups; in get_cgroup_id() local 42 cgroups = task->cgroups; in get_cgroup_id() 43 if (!cgroups) in get_cgroup_id() 45 cgroup_id = cgroups->dfl_cgrp->kn->id; in get_cgroup_id()
|
| H A D | cgrp_ls_recursion.c | 59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG() 92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| H A D | cgrp_ls_sleepable.c | 86 __no_rcu_lock(task->cgroups->dfl_cgrp); in no_rcu_lock() 118 cgrp = task->cgroups->dfl_cgrp; in yes_rcu_lock()
|
| H A D | cgrp_ls_tp_btf.c | 86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG() 124 __on_exit(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| /linux/Documentation/admin-guide/ |
| H A D | cgroup-v2.rst | 109 multiple individual control groups, the plural form "cgroups" is used. 126 cgroups form a tree structure and every process in the system belongs 136 processes which belong to the cgroups consisting the inclusive 212 propagation into leaf cgroups. This allows protecting entire 267 A given cgroup may have multiple child cgroups forming a tree 333 different cgroups and are not subject to the no internal process 334 constraint - threaded controllers can be enabled on non-leaf cgroups 340 can't have populated child cgroups which aren't threaded. Because the 342 serve both as a threaded domain and a parent to domain cgroups. 406 between threads in a non-leaf cgroup and its child cgroups. Each [all …]
|
| /linux/block/ |
| H A D | Kconfig.iosched | 38 (cgroups-v1) or io (cgroups-v2) controller.
|
| /linux/tools/perf/Documentation/ |
| H A D | perf-bench.txt | 128 --cgroups=:: 129 Names of cgroups for sender and receiver, separated by a comma. 131 Note that perf doesn't create nor delete the cgroups, so users should 132 make sure that the cgroups exist and are accessible before use. 154 (executing 1000000 pipe operations between cgroups)
|
| /linux/Documentation/gpu/ |
| H A D | drm-compute.rst | 38 controlling resources. The standard kernel way of doing so is cgroups. 40 This creates a third option, using cgroups to prevent eviction. Both GPU and 43 into cgroups, that will allow jobs to run next to each other without
|
| /linux/Documentation/bpf/ |
| H A D | map_cgrp_storage.rst | 9 storage for cgroups. It is only available with ``CONFIG_CGROUPS``. 56 ptr = bpf_cgrp_storage_get(&cgrp_storage, task->cgroups->dfl_cgrp, 0,
|
| H A D | map_cgroup_storage.rst | 10 attach to cgroups; the programs are made available by the same Kconfig. The 16 cgroups on their own. 132 that uses the map. A program may be attached to multiple cgroups or have
|
| /linux/include/linux/ |
| H A D | psi.h | 63 rcu_assign_pointer(p->cgroups, to); in cgroup_move_task()
|
| /linux/tools/perf/util/bpf_skel/ |
| H A D | off_cpu.bpf.c | 155 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); in get_cgroup_id() 166 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_id()
|