1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CPUSET_H
3 #define _LINUX_CPUSET_H
4 /*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/sched/topology.h>
14 #include <linux/sched/task.h>
15 #include <linux/cpumask.h>
16 #include <linux/nodemask.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_context.h>
19 #include <linux/jump_label.h>
20
21 #ifdef CONFIG_CPUSETS
22
23 /*
24 * Static branch rewrites can happen in an arbitrary order for a given
25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27 * to ensure that begin() always gets rewritten before retry() in the
28 * disabled -> enabled transition. If not, then if local irqs are disabled
29 * around the loop, we can deadlock since retry() would always be
30 * comparing the latest value of the mems_allowed seqcount against 0 as
31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32 * transition should happen in reverse order for the same reasons (want to stop
33 * looking at real value of mems_allowed.sequence in retry() first).
34 */
35 extern struct static_key_false cpusets_pre_enable_key;
36 extern struct static_key_false cpusets_enabled_key;
37 extern struct static_key_false cpusets_insane_config_key;
38
cpusets_enabled(void)39 static inline bool cpusets_enabled(void)
40 {
41 return static_branch_unlikely(&cpusets_enabled_key);
42 }
43
cpuset_inc(void)44 static inline void cpuset_inc(void)
45 {
46 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
47 static_branch_inc_cpuslocked(&cpusets_enabled_key);
48 }
49
cpuset_dec(void)50 static inline void cpuset_dec(void)
51 {
52 static_branch_dec_cpuslocked(&cpusets_enabled_key);
53 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
54 }
55
56 /*
57 * This will get enabled whenever a cpuset configuration is considered
58 * unsupportable in general. E.g. movable only node which cannot satisfy
59 * any non movable allocations (see update_nodemask). Page allocator
60 * needs to make additional checks for those configurations and this
61 * check is meant to guard those checks without any overhead for sane
62 * configurations.
63 */
cpusets_insane_config(void)64 static inline bool cpusets_insane_config(void)
65 {
66 return static_branch_unlikely(&cpusets_insane_config_key);
67 }
68
69 extern int cpuset_init(void);
70 extern void cpuset_init_smp(void);
71 extern void cpuset_force_rebuild(void);
72 extern void cpuset_update_active_cpus(void);
73 extern void inc_dl_tasks_cs(struct task_struct *task);
74 extern void dec_dl_tasks_cs(struct task_struct *task);
75 extern void cpuset_lock(void);
76 extern void cpuset_unlock(void);
77 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
78 extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
79 extern bool cpuset_cpu_is_isolated(int cpu);
80 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
81 #define cpuset_current_mems_allowed (current->mems_allowed)
82 void cpuset_init_current_mems_allowed(void);
83 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
84
85 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
86
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
88 {
89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
90 }
91
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
93 {
94 if (cpusets_enabled())
95 return __cpuset_zone_allowed(z, gfp_mask);
96 return true;
97 }
98
99 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
100 const struct task_struct *tsk2);
101
102 #ifdef CONFIG_CPUSETS_V1
103 #define cpuset_memory_pressure_bump() \
104 do { \
105 if (cpuset_memory_pressure_enabled) \
106 __cpuset_memory_pressure_bump(); \
107 } while (0)
108 extern int cpuset_memory_pressure_enabled;
109 extern void __cpuset_memory_pressure_bump(void);
110 #else
cpuset_memory_pressure_bump(void)111 static inline void cpuset_memory_pressure_bump(void) { }
112 #endif
113
114 extern void cpuset_task_status_allowed(struct seq_file *m,
115 struct task_struct *task);
116 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
117 struct pid *pid, struct task_struct *tsk);
118
119 extern int cpuset_mem_spread_node(void);
120
cpuset_do_page_mem_spread(void)121 static inline int cpuset_do_page_mem_spread(void)
122 {
123 return task_spread_page(current);
124 }
125
126 extern bool current_cpuset_is_being_rebound(void);
127
128 extern void rebuild_sched_domains(void);
129
130 extern void cpuset_print_current_mems_allowed(void);
131
132 /*
133 * read_mems_allowed_begin is required when making decisions involving
134 * mems_allowed such as during page allocation. mems_allowed can be updated in
135 * parallel and depending on the new value an operation can fail potentially
136 * causing process failure. A retry loop with read_mems_allowed_begin and
137 * read_mems_allowed_retry prevents these artificial failures.
138 */
read_mems_allowed_begin(void)139 static inline unsigned int read_mems_allowed_begin(void)
140 {
141 if (!static_branch_unlikely(&cpusets_pre_enable_key))
142 return 0;
143
144 return read_seqcount_begin(¤t->mems_allowed_seq);
145 }
146
147 /*
148 * If this returns true, the operation that took place after
149 * read_mems_allowed_begin may have failed artificially due to a concurrent
150 * update of mems_allowed. It is up to the caller to retry the operation if
151 * appropriate.
152 */
read_mems_allowed_retry(unsigned int seq)153 static inline bool read_mems_allowed_retry(unsigned int seq)
154 {
155 if (!static_branch_unlikely(&cpusets_enabled_key))
156 return false;
157
158 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
159 }
160
set_mems_allowed(nodemask_t nodemask)161 static inline void set_mems_allowed(nodemask_t nodemask)
162 {
163 unsigned long flags;
164
165 task_lock(current);
166 local_irq_save(flags);
167 write_seqcount_begin(¤t->mems_allowed_seq);
168 current->mems_allowed = nodemask;
169 write_seqcount_end(¤t->mems_allowed_seq);
170 local_irq_restore(flags);
171 task_unlock(current);
172 }
173
174 #else /* !CONFIG_CPUSETS */
175
cpusets_enabled(void)176 static inline bool cpusets_enabled(void) { return false; }
177
cpusets_insane_config(void)178 static inline bool cpusets_insane_config(void) { return false; }
179
cpuset_init(void)180 static inline int cpuset_init(void) { return 0; }
cpuset_init_smp(void)181 static inline void cpuset_init_smp(void) {}
182
cpuset_force_rebuild(void)183 static inline void cpuset_force_rebuild(void) { }
184
cpuset_update_active_cpus(void)185 static inline void cpuset_update_active_cpus(void)
186 {
187 partition_sched_domains(1, NULL, NULL);
188 }
189
inc_dl_tasks_cs(struct task_struct * task)190 static inline void inc_dl_tasks_cs(struct task_struct *task) { }
dec_dl_tasks_cs(struct task_struct * task)191 static inline void dec_dl_tasks_cs(struct task_struct *task) { }
cpuset_lock(void)192 static inline void cpuset_lock(void) { }
cpuset_unlock(void)193 static inline void cpuset_unlock(void) { }
194
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)195 static inline void cpuset_cpus_allowed(struct task_struct *p,
196 struct cpumask *mask)
197 {
198 cpumask_copy(mask, task_cpu_possible_mask(p));
199 }
200
cpuset_cpus_allowed_fallback(struct task_struct * p)201 static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
202 {
203 return false;
204 }
205
cpuset_cpu_is_isolated(int cpu)206 static inline bool cpuset_cpu_is_isolated(int cpu)
207 {
208 return false;
209 }
210
cpuset_mems_allowed(struct task_struct * p)211 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
212 {
213 return node_possible_map;
214 }
215
216 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
cpuset_init_current_mems_allowed(void)217 static inline void cpuset_init_current_mems_allowed(void) {}
218
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)219 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
220 {
221 return 1;
222 }
223
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)224 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
225 {
226 return true;
227 }
228
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)229 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
230 {
231 return true;
232 }
233
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)234 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
235 const struct task_struct *tsk2)
236 {
237 return 1;
238 }
239
cpuset_memory_pressure_bump(void)240 static inline void cpuset_memory_pressure_bump(void) {}
241
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)242 static inline void cpuset_task_status_allowed(struct seq_file *m,
243 struct task_struct *task)
244 {
245 }
246
cpuset_mem_spread_node(void)247 static inline int cpuset_mem_spread_node(void)
248 {
249 return 0;
250 }
251
cpuset_do_page_mem_spread(void)252 static inline int cpuset_do_page_mem_spread(void)
253 {
254 return 0;
255 }
256
current_cpuset_is_being_rebound(void)257 static inline bool current_cpuset_is_being_rebound(void)
258 {
259 return false;
260 }
261
rebuild_sched_domains(void)262 static inline void rebuild_sched_domains(void)
263 {
264 partition_sched_domains(1, NULL, NULL);
265 }
266
cpuset_print_current_mems_allowed(void)267 static inline void cpuset_print_current_mems_allowed(void)
268 {
269 }
270
set_mems_allowed(nodemask_t nodemask)271 static inline void set_mems_allowed(nodemask_t nodemask)
272 {
273 }
274
read_mems_allowed_begin(void)275 static inline unsigned int read_mems_allowed_begin(void)
276 {
277 return 0;
278 }
279
read_mems_allowed_retry(unsigned int seq)280 static inline bool read_mems_allowed_retry(unsigned int seq)
281 {
282 return false;
283 }
284
285 #endif /* !CONFIG_CPUSETS */
286
287 #endif /* _LINUX_CPUSET_H */
288