/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/ |
D | cgroup_hierarchical_stats.c | 54 } cgroups[] = { variable 64 #define N_CGROUPS ARRAY_SIZE(cgroups) 133 fd = create_and_get_cgroup(cgroups[i].path); in setup_cgroups() 137 cgroups[i].fd = fd; in setup_cgroups() 138 cgroups[i].id = get_cgroup_id(cgroups[i].path); in setup_cgroups() 147 close(cgroups[i].fd); in cleanup_cgroups() 175 if (join_parent_cgroup(cgroups[i].path)) in attach_processes() 220 attach_counters[i] = get_attach_counter(cgroups[i].id, in check_attach_counters() 221 cgroups[i].name); in check_attach_counters() 288 err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); in setup_progs() [all …]
|
/linux-6.12.1/tools/cgroup/ |
D | memcg_shrinker.py | 11 cgroups = {} 17 cgroups[ino] = path 20 return cgroups 44 cgroups = scan_cgroups("/sys/fs/cgroup/") 58 cg = cgroups[ino]
|
/linux-6.12.1/Documentation/admin-guide/cgroup-v1/ |
D | cgroups.rst | 21 1.1 What are cgroups ? 22 1.2 Why are cgroups needed ? 23 1.3 How are cgroups implemented ? 26 1.6 How do I use cgroups ? 41 1.1 What are cgroups ? 54 facilities provided by cgroups to treat groups of tasks in 60 A *hierarchy* is a set of cgroups arranged in a tree, such that 61 every task in the system is in exactly one of the cgroups in the 67 cgroups. Each hierarchy is a partition of all tasks in the system. 69 User-level code may create and destroy cgroups by name in an [all …]
|
D | net_cls.rst | 9 different priorities to packets from different cgroups. 13 Creating a net_cls cgroups instance creates a net_cls.classid file.
|
D | devices.rst | 43 Any task can move itself between cgroups. This clearly won't 60 device cgroups maintain hierarchy by making sure a cgroup never has more 121 not be possible once the device cgroups has children. 126 device cgroups is implemented internally using a behavior (ALLOW, DENY) and a
|
D | freezer-subsystem.rst | 9 whole. The cgroup freezer uses cgroups to describe the set of tasks to 57 tasks belonging to the cgroup and all its descendant cgroups. Each 73 to the cgroup or one of its descendant cgroups until the new task is 79 descendant cgroups.
|
D | index.rst | 10 cgroups
|
D | memory.rst | 298 The reclaim algorithm has not been modified for cgroups, except that 337 Kernel memory accounting is enabled for all memory cgroups by default. But 414 2. Prepare the cgroups (see :ref:`Why are cgroups needed? 415 <cgroups-why-needed>` for the background information):: 618 (Note: file and shmem may be shared among other cgroups. In that case, 682 The hierarchy is created by creating the appropriate cgroups in the 746 reclaiming memory for balancing between memory cgroups 761 cgroups to allow fine-grained policy adjustments without having to 835 Memory cgroup implements memory thresholds using the cgroups notification 836 API (see cgroups.txt). It allows to register multiple memory and memsw [all …]
|
/linux-6.12.1/tools/perf/util/ |
D | cgroup.c | 569 down_write(&env->cgroups.lock); in cgroup__findnew() 570 cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path); in cgroup__findnew() 571 up_write(&env->cgroups.lock); in cgroup__findnew() 584 down_read(&env->cgroups.lock); in cgroup__find() 585 cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL); in cgroup__find() 586 up_read(&env->cgroups.lock); in cgroup__find() 595 down_write(&env->cgroups.lock); in perf_env__purge_cgroups() 596 while (!RB_EMPTY_ROOT(&env->cgroups.tree)) { in perf_env__purge_cgroups() 597 node = rb_first(&env->cgroups.tree); in perf_env__purge_cgroups() 600 rb_erase(node, &env->cgroups.tree); in perf_env__purge_cgroups() [all …]
|
D | bpf_lock_contention.c | 177 read_all_cgroups(&con->cgroups); in lock_contention_prepare() 373 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); in lock_contention_get_name() 534 while (!RB_EMPTY_ROOT(&con->cgroups)) { in lock_contention_finish() 535 struct rb_node *node = rb_first(&con->cgroups); in lock_contention_finish() 538 rb_erase(node, &con->cgroups); in lock_contention_finish()
|
D | cgroup.h | 31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups,
|
D | lock-contention.h | 141 struct rb_root cgroups; member
|
/linux-6.12.1/tools/testing/selftests/bpf/progs/ |
D | percpu_alloc_cgrp_local_storage.c | 30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG() 56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG() 89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
|
D | rcu_read_lock.c | 33 struct css_set *cgroups; in get_cgroup_id() local 41 cgroups = task->cgroups; in get_cgroup_id() 42 if (!cgroups) in get_cgroup_id() 44 cgroup_id = cgroups->dfl_cgrp->kn->id; in get_cgroup_id()
|
D | cgrp_ls_recursion.c | 59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG() 92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
D | cgrp_ls_tp_btf.c | 86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG() 124 __on_exit(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
D | cgrp_ls_sleepable.c | 87 __no_rcu_lock(task->cgroups->dfl_cgrp); in no_rcu_lock() 119 cgrp = task->cgroups->dfl_cgrp; in yes_rcu_lock()
|
/linux-6.12.1/Documentation/admin-guide/ |
D | cgroup-v2.rst | 103 multiple individual control groups, the plural form "cgroups" is used. 120 cgroups form a tree structure and every process in the system belongs 130 processes which belong to the cgroups consisting the inclusive 206 propagation into leaf cgroups. This allows protecting entire 261 A given cgroup may have multiple child cgroups forming a tree 327 different cgroups and are not subject to the no internal process 328 constraint - threaded controllers can be enabled on non-leaf cgroups 334 can't have populated child cgroups which aren't threaded. Because the 336 serve both as a threaded domain and a parent to domain cgroups. 400 between threads in a non-leaf cgroup and its child cgroups. Each [all …]
|
/linux-6.12.1/tools/perf/Documentation/ |
D | perf-bench.txt | 128 --cgroups=:: 129 Names of cgroups for sender and receiver, separated by a comma. 131 Note that perf doesn't create nor delete the cgroups, so users should 132 make sure that the cgroups exist and are accessible before use. 154 (executing 1000000 pipe operations between cgroups)
|
/linux-6.12.1/block/ |
D | Kconfig.iosched | 38 (cgroups-v1) or io (cgroups-v2) controller.
|
/linux-6.12.1/tools/perf/util/bpf_skel/ |
D | off_cpu.bpf.c | 126 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); in get_cgroup_id() 137 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_id()
|
/linux-6.12.1/Documentation/bpf/ |
D | map_cgrp_storage.rst | 9 storage for cgroups. It is only available with ``CONFIG_CGROUPS``. 56 ptr = bpf_cgrp_storage_get(&cgrp_storage, task->cgroups->dfl_cgrp, 0,
|
D | map_cgroup_storage.rst | 10 attach to cgroups; the programs are made available by the same Kconfig. The 16 cgroups on their own. 132 that uses the map. A program may be attached to multiple cgroups or have
|
/linux-6.12.1/include/linux/ |
D | psi.h | 63 rcu_assign_pointer(p->cgroups, to); in cgroup_move_task()
|
/linux-6.12.1/tools/perf/tests/shell/ |
D | record_bpf_filter.sh | 154 -a --all-cgroups --synth=cgroup -o "${perfdata}" true 2> /dev/null
|