/linux-6.12.1/kernel/cgroup/ |
D | cpuset-internal.h | 74 struct cpuset { struct 185 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) in css_cs() argument 187 return css ? container_of(css, struct cpuset, css) : NULL; in css_cs() 191 static inline struct cpuset *task_cs(struct task_struct *task) in task_cs() 196 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() 202 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() 207 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() 212 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() 217 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() 222 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() [all …]
|
D | cpuset.c | 135 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() 142 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() 147 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() 152 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() 160 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() 169 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() 180 static struct cpuset top_cpuset = { 270 static inline void dec_attach_in_progress_locked(struct cpuset *cs) in dec_attach_in_progress_locked() 279 static inline void dec_attach_in_progress(struct cpuset *cs) in dec_attach_in_progress() 309 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() [all …]
|
D | cpuset-v1.c | 10 struct cpuset *cs; 147 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() 167 struct cpuset *cs = css_cs(css); in cpuset_write_s64() 192 struct cpuset *cs = css_cs(css); in cpuset_read_s64() 212 void cpuset1_update_task_spread_flags(struct cpuset *cs, in cpuset1_update_task_spread_flags() 237 void cpuset1_update_tasks_flags(struct cpuset *cs) in cpuset1_update_tasks_flags() 255 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() 257 struct cpuset *parent; in remove_tasks_in_empty_cpuset() 285 void cpuset1_hotplug_update_tasks(struct cpuset *cs, in cpuset1_hotplug_update_tasks() 339 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) in is_cpuset_subset() [all …]
|
/linux-6.12.1/Documentation/admin-guide/cgroup-v1/ |
D | cpusets.rst | 48 the resources within a task's current cpuset. They form a nested 59 policy, are both filtered through that task's cpuset, filtering out any 60 CPUs or Memory Nodes not in that cpuset. The scheduler will not 67 cpusets and which CPUs and Memory Nodes are assigned to each cpuset, 68 specify and query to which cpuset a task is assigned, and list the 69 task pids assigned to a cpuset. 103 The kernel cpuset patch provides the minimum essential kernel 124 - Each task in the system is attached to a cpuset, via a pointer 127 allowed in that task's cpuset. 129 those Memory Nodes allowed in that task's cpuset. [all …]
|
D | memcg_test.rst | 165 For NUMA, migration is an another special case. To do easy test, cpuset 168 mount -t cgroup -o cpuset none /opt/cpuset 170 mkdir /opt/cpuset/01 171 echo 1 > /opt/cpuset/01/cpuset.cpus 172 echo 0 > /opt/cpuset/01/cpuset.mems 173 echo 1 > /opt/cpuset/01/cpuset.memory_migrate 174 mkdir /opt/cpuset/02 175 echo 1 > /opt/cpuset/02/cpuset.cpus 176 echo 1 > /opt/cpuset/02/cpuset.mems 177 echo 1 > /opt/cpuset/02/cpuset.memory_migrate [all …]
|
/linux-6.12.1/tools/testing/selftests/cgroup/ |
D | test_cpuset_prs.sh | 25 SUBPARTS_CPUS=$CGROUP2/.__DEBUG__.cpuset.cpus.subpartitions 26 CPULIST=$(cat $CGROUP2/cpuset.cpus.effective) 71 echo +cpuset > cgroup.subtree_control 79 echo 0-6 > test/cpuset.cpus 80 echo root > test/cpuset.cpus.partition 81 cat test/cpuset.cpus.partition | grep -q invalid 83 echo member > test/cpuset.cpus.partition 84 echo "" > test/cpuset.cpus 94 BOOT_ISOLCPUS=$(cat $CGROUP2/cpuset.cpus.isolated) 136 echo $EXPECTED_VAL > cpuset.cpus.partition [all …]
|
D | test_cpuset_v1_hp.sh | 17 CPUSET=$(mount -t cgroup | grep cpuset | head -1 | awk -e '{print $3}') 25 echo 1 > $CPUSET/$TDIR/cpuset.cpus 26 echo 0 > $CPUSET/$TDIR/cpuset.mems 30 NEWCS=$(cat /proc/$TASK/cpuset) 39 NEWCS=$(cat /proc/$TASK/cpuset)
|
/linux-6.12.1/tools/sched_ext/ |
D | scx_central.c | 51 cpu_set_t *cpuset; in main() local 96 cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); in main() 97 SCX_BUG_ON(!cpuset, "Failed to allocate cpuset"); in main() 98 CPU_ZERO(cpuset); in main() 99 CPU_SET(skel->rodata->central_cpu, cpuset); in main() 100 SCX_BUG_ON(sched_setaffinity(0, sizeof(cpuset), cpuset), in main() 103 CPU_FREE(cpuset); in main()
|
/linux-6.12.1/Documentation/translations/zh_CN/scheduler/ |
D | sched-capacity.rst | 259 是,这个键是系统范围可见的。想象下面使用了cpuset的步骤:: 272 mkdir /sys/fs/cgroup/cpuset/cs0 273 echo 0-1 > /sys/fs/cgroup/cpuset/cs0/cpuset.cpus 274 echo 0 > /sys/fs/cgroup/cpuset/cs0/cpuset.mems 276 mkdir /sys/fs/cgroup/cpuset/cs1 277 echo 2-7 > /sys/fs/cgroup/cpuset/cs1/cpuset.cpus 278 echo 0 > /sys/fs/cgroup/cpuset/cs1/cpuset.mems 280 echo 0 > /sys/fs/cgroup/cpuset/cpuset.sched_load_balance
|
/linux-6.12.1/tools/testing/selftests/x86/ |
D | sysret_ss_attrs.c | 55 cpu_set_t cpuset; in main() local 56 CPU_ZERO(&cpuset); in main() 57 CPU_SET(0, &cpuset); in main() 58 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) in main()
|
D | ioperm.c | 90 cpu_set_t cpuset; in main() local 91 CPU_ZERO(&cpuset); in main() 92 CPU_SET(0, &cpuset); in main() 93 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) in main()
|
D | iopl.c | 174 cpu_set_t cpuset; in main() local 176 CPU_ZERO(&cpuset); in main() 177 CPU_SET(0, &cpuset); in main() 178 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) in main()
|
/linux-6.12.1/tools/testing/selftests/powerpc/benchmarks/ |
D | fork.c | 32 cpu_set_t cpuset; in set_cpu() local 37 CPU_ZERO(&cpuset); in set_cpu() 38 CPU_SET(cpu, &cpuset); in set_cpu() 40 if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { in set_cpu() 137 cpu_set_t cpuset; in bench_thread() local 149 CPU_ZERO(&cpuset); in bench_thread() 150 CPU_SET(cpu, &cpuset); in bench_thread() 152 rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset); in bench_thread()
|
D | context_switch.c | 78 cpu_set_t cpuset; in start_thread_on() local 81 CPU_ZERO(&cpuset); in start_thread_on() 82 CPU_SET(cpu, &cpuset); in start_thread_on() 91 rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset); in start_thread_on() 109 cpu_set_t *cpuset; in start_process_on() local 123 cpuset = CPU_ALLOC(ncpus); in start_process_on() 124 if (!cpuset) { in start_process_on() 128 CPU_ZERO_S(size, cpuset); in start_process_on() 129 CPU_SET_S(cpu, size, cpuset); in start_process_on() 131 if (sched_setaffinity(0, size, cpuset)) { in start_process_on() [all …]
|
/linux-6.12.1/tools/testing/selftests/powerpc/tm/ |
D | tm-poison.c | 29 cpu_set_t cpuset; in tm_poison_test() local 42 CPU_ZERO(&cpuset); in tm_poison_test() 43 CPU_SET(cpu, &cpuset); in tm_poison_test() 44 FAIL_IF(sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0); in tm_poison_test()
|
/linux-6.12.1/tools/perf/bench/ |
D | futex-wake.c | 100 cpu_set_t *cpuset; in block_threads() local 106 cpuset = CPU_ALLOC(nrcpus); in block_threads() 107 BUG_ON(!cpuset); in block_threads() 115 CPU_ZERO_S(size, cpuset); in block_threads() 116 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in block_threads() 118 if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { in block_threads() 119 CPU_FREE(cpuset); in block_threads() 124 CPU_FREE(cpuset); in block_threads() 129 CPU_FREE(cpuset); in block_threads()
|
D | futex-lock-pi.c | 123 cpu_set_t *cpuset; in create_threads() local 130 cpuset = CPU_ALLOC(nrcpus); in create_threads() 131 BUG_ON(!cpuset); in create_threads() 147 CPU_ZERO_S(size, cpuset); in create_threads() 148 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in create_threads() 150 if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { in create_threads() 151 CPU_FREE(cpuset); in create_threads() 156 CPU_FREE(cpuset); in create_threads() 161 CPU_FREE(cpuset); in create_threads()
|
D | futex-hash.c | 126 cpu_set_t *cpuset; in bench_futex_hash() local 178 cpuset = CPU_ALLOC(nrcpus); in bench_futex_hash() 179 BUG_ON(!cpuset); in bench_futex_hash() 188 CPU_ZERO_S(size, cpuset); in bench_futex_hash() 190 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in bench_futex_hash() 191 ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset); in bench_futex_hash() 193 CPU_FREE(cpuset); in bench_futex_hash() 199 CPU_FREE(cpuset); in bench_futex_hash() 204 CPU_FREE(cpuset); in bench_futex_hash()
|
D | futex-requeue.c | 126 cpu_set_t *cpuset; in block_threads() local 133 cpuset = CPU_ALLOC(nrcpus); in block_threads() 134 BUG_ON(!cpuset); in block_threads() 142 CPU_ZERO_S(size, cpuset); in block_threads() 143 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in block_threads() 145 if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { in block_threads() 146 CPU_FREE(cpuset); in block_threads() 151 CPU_FREE(cpuset); in block_threads() 156 CPU_FREE(cpuset); in block_threads()
|
D | futex-wake-parallel.c | 150 cpu_set_t *cpuset; in block_threads() local 157 cpuset = CPU_ALLOC(nrcpus); in block_threads() 158 BUG_ON(!cpuset); in block_threads() 166 CPU_ZERO_S(size, cpuset); in block_threads() 167 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in block_threads() 169 if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { in block_threads() 170 CPU_FREE(cpuset); in block_threads() 175 CPU_FREE(cpuset); in block_threads() 180 CPU_FREE(cpuset); in block_threads()
|
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/ |
D | timer_lockup.c | 26 cpu_set_t cpuset; in timer_lockup_thread() local 28 CPU_ZERO(&cpuset); in timer_lockup_thread() 29 CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset); in timer_lockup_thread() 30 ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), in timer_lockup_thread() 31 &cpuset), in timer_lockup_thread()
|
D | test_overhead.c | 49 cpu_set_t cpuset; in setaffinity() local 52 CPU_ZERO(&cpuset); in setaffinity() 53 CPU_SET(cpu, &cpuset); in setaffinity() 54 sched_setaffinity(0, sizeof(cpuset), &cpuset); in setaffinity()
|
/linux-6.12.1/Documentation/translations/zh_CN/mm/ |
D | hugetlbfs_reserv.rst | 416 * 当cpuset被配置时,它打破了严格的hugetlb页面预留,因为计数是在一个全局变量上完 417 * 成的。在有cpuset的情况下,这样的预留完全是垃圾,因为预留没有根据当前cpuset的 418 * 页面可用性来检查。在任务所在的cpuset中缺乏空闲的htlb页面时,应用程序仍然有可能 419 * 被内核OOM'ed。试图用cpuset来执行严格的计数几乎是不可能的(或者说太难看了),因 420 * 为cpuset太不稳定了,任务或内存节点可以在cpuset之间动态移动。与cpuset共享 422 * 页的可用性,作为一种最好的尝试,希望能将cpuset改变语义的影响降到最低。
|
/linux-6.12.1/tools/power/cpupower/bench/ |
D | system.c | 77 cpu_set_t cpuset; in set_cpu_affinity() local 79 CPU_ZERO(&cpuset); in set_cpu_affinity() 80 CPU_SET(cpu, &cpuset); in set_cpu_affinity() 84 if (sched_setaffinity(getpid(), sizeof(cpu_set_t), &cpuset) < 0) { in set_cpu_affinity()
|
/linux-6.12.1/tools/testing/selftests/intel_pstate/ |
D | aperf.c | 31 cpu_set_t cpuset; in main() local 54 CPU_ZERO(&cpuset); in main() 55 CPU_SET(cpu, &cpuset); in main() 57 if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset)) { in main()
|