1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef __LINUX_CPUMASK_H
3  #define __LINUX_CPUMASK_H
4  
5  /*
6   * Cpumasks provide a bitmap suitable for representing the
7   * set of CPUs in a system, one bit position per CPU number.  In general,
8   * only nr_cpu_ids (<= NR_CPUS) bits are valid.
9   */
10  #include <linux/cleanup.h>
11  #include <linux/kernel.h>
12  #include <linux/bitmap.h>
13  #include <linux/cpumask_types.h>
14  #include <linux/atomic.h>
15  #include <linux/bug.h>
16  #include <linux/gfp_types.h>
17  #include <linux/numa.h>
18  
19  /**
20   * cpumask_pr_args - printf args to output a cpumask
21   * @maskp: cpumask to be printed
22   *
23   * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
24   */
25  #define cpumask_pr_args(maskp)		nr_cpu_ids, cpumask_bits(maskp)
26  
27  #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
28  #define nr_cpu_ids ((unsigned int)NR_CPUS)
29  #else
30  extern unsigned int nr_cpu_ids;
31  #endif
32  
set_nr_cpu_ids(unsigned int nr)33  static __always_inline void set_nr_cpu_ids(unsigned int nr)
34  {
35  #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
36  	WARN_ON(nr != nr_cpu_ids);
37  #else
38  	nr_cpu_ids = nr;
39  #endif
40  }
41  
42  /*
43   * We have several different "preferred sizes" for the cpumask
44   * operations, depending on operation.
45   *
46   * For example, the bitmap scanning and operating operations have
47   * optimized routines that work for the single-word case, but only when
48   * the size is constant. So if NR_CPUS fits in one single word, we are
49   * better off using that small constant, in order to trigger the
50   * optimized bit finding. That is 'small_cpumask_size'.
51   *
52   * The clearing and copying operations will similarly perform better
53   * with a constant size, but we limit that size arbitrarily to four
54   * words. We call this 'large_cpumask_size'.
55   *
56   * Finally, some operations just want the exact limit, either because
57   * they set bits or just don't have any faster fixed-sized versions. We
58   * call this just 'nr_cpumask_bits'.
59   *
60   * Note that these optional constants are always guaranteed to be at
61   * least as big as 'nr_cpu_ids' itself is, and all our cpumask
62   * allocations are at least that size (see cpumask_size()). The
63   * optimization comes from being able to potentially use a compile-time
64   * constant instead of a run-time generated exact number of CPUs.
65   */
66  #if NR_CPUS <= BITS_PER_LONG
67    #define small_cpumask_bits ((unsigned int)NR_CPUS)
68    #define large_cpumask_bits ((unsigned int)NR_CPUS)
69  #elif NR_CPUS <= 4*BITS_PER_LONG
70    #define small_cpumask_bits nr_cpu_ids
71    #define large_cpumask_bits ((unsigned int)NR_CPUS)
72  #else
73    #define small_cpumask_bits nr_cpu_ids
74    #define large_cpumask_bits nr_cpu_ids
75  #endif
76  #define nr_cpumask_bits nr_cpu_ids
77  
78  /*
79   * The following particular system cpumasks and operations manage
80   * possible, present, active and online cpus.
81   *
82   *     cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
83   *     cpu_present_mask - has bit 'cpu' set iff cpu is populated
84   *     cpu_enabled_mask  - has bit 'cpu' set iff cpu can be brought online
85   *     cpu_online_mask  - has bit 'cpu' set iff cpu available to scheduler
86   *     cpu_active_mask  - has bit 'cpu' set iff cpu available to migration
87   *
88   *  If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
89   *
90   *  The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
91   *  that it is possible might ever be plugged in at anytime during the
92   *  life of that system boot.  The cpu_present_mask is dynamic(*),
93   *  representing which CPUs are currently plugged in.  And
94   *  cpu_online_mask is the dynamic subset of cpu_present_mask,
95   *  indicating those CPUs available for scheduling.
96   *
97   *  If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
98   *  depending on what ACPI reports as currently plugged in, otherwise
99   *  cpu_present_mask is just a copy of cpu_possible_mask.
100   *
101   *  (*) Well, cpu_present_mask is dynamic in the hotplug case.  If not
102   *      hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
103   *
104   * Subtleties:
105   * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
106   *    assumption that their single CPU is online.  The UP
107   *    cpu_{online,possible,present}_masks are placebos.  Changing them
108   *    will have no useful affect on the following num_*_cpus()
109   *    and cpu_*() macros in the UP case.  This ugliness is a UP
110   *    optimization - don't waste any instructions or memory references
111   *    asking if you're online or how many CPUs there are if there is
112   *    only one CPU.
113   */
114  
115  extern struct cpumask __cpu_possible_mask;
116  extern struct cpumask __cpu_online_mask;
117  extern struct cpumask __cpu_enabled_mask;
118  extern struct cpumask __cpu_present_mask;
119  extern struct cpumask __cpu_active_mask;
120  extern struct cpumask __cpu_dying_mask;
121  #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
122  #define cpu_online_mask   ((const struct cpumask *)&__cpu_online_mask)
123  #define cpu_enabled_mask   ((const struct cpumask *)&__cpu_enabled_mask)
124  #define cpu_present_mask  ((const struct cpumask *)&__cpu_present_mask)
125  #define cpu_active_mask   ((const struct cpumask *)&__cpu_active_mask)
126  #define cpu_dying_mask    ((const struct cpumask *)&__cpu_dying_mask)
127  
128  extern atomic_t __num_online_cpus;
129  
130  extern cpumask_t cpus_booted_once_mask;
131  
cpu_max_bits_warn(unsigned int cpu,unsigned int bits)132  static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
133  {
134  #ifdef CONFIG_DEBUG_PER_CPU_MAPS
135  	WARN_ON_ONCE(cpu >= bits);
136  #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
137  }
138  
139  /* verify cpu argument to cpumask_* operators */
cpumask_check(unsigned int cpu)140  static __always_inline unsigned int cpumask_check(unsigned int cpu)
141  {
142  	cpu_max_bits_warn(cpu, small_cpumask_bits);
143  	return cpu;
144  }
145  
146  /**
147   * cpumask_first - get the first cpu in a cpumask
148   * @srcp: the cpumask pointer
149   *
150   * Return: >= nr_cpu_ids if no cpus set.
151   */
cpumask_first(const struct cpumask * srcp)152  static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
153  {
154  	return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
155  }
156  
157  /**
158   * cpumask_first_zero - get the first unset cpu in a cpumask
159   * @srcp: the cpumask pointer
160   *
161   * Return: >= nr_cpu_ids if all cpus are set.
162   */
cpumask_first_zero(const struct cpumask * srcp)163  static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
164  {
165  	return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
166  }
167  
168  /**
169   * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
170   * @srcp1: the first input
171   * @srcp2: the second input
172   *
173   * Return: >= nr_cpu_ids if no cpus set in both.  See also cpumask_next_and().
174   */
175  static __always_inline
cpumask_first_and(const struct cpumask * srcp1,const struct cpumask * srcp2)176  unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
177  {
178  	return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
179  }
180  
181  /**
182   * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
183   * @srcp1: the first input
184   * @srcp2: the second input
185   * @srcp3: the third input
186   *
187   * Return: >= nr_cpu_ids if no cpus set in all.
188   */
189  static __always_inline
cpumask_first_and_and(const struct cpumask * srcp1,const struct cpumask * srcp2,const struct cpumask * srcp3)190  unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
191  				   const struct cpumask *srcp2,
192  				   const struct cpumask *srcp3)
193  {
194  	return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
195  				      cpumask_bits(srcp3), small_cpumask_bits);
196  }
197  
198  /**
199   * cpumask_last - get the last CPU in a cpumask
200   * @srcp:	- the cpumask pointer
201   *
202   * Return:	>= nr_cpumask_bits if no CPUs set.
203   */
cpumask_last(const struct cpumask * srcp)204  static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
205  {
206  	return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
207  }
208  
209  /**
210   * cpumask_next - get the next cpu in a cpumask
211   * @n: the cpu prior to the place to search (i.e. return will be > @n)
212   * @srcp: the cpumask pointer
213   *
214   * Return: >= nr_cpu_ids if no further cpus set.
215   */
216  static __always_inline
cpumask_next(int n,const struct cpumask * srcp)217  unsigned int cpumask_next(int n, const struct cpumask *srcp)
218  {
219  	/* -1 is a legal arg here. */
220  	if (n != -1)
221  		cpumask_check(n);
222  	return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1);
223  }
224  
225  /**
226   * cpumask_next_zero - get the next unset cpu in a cpumask
227   * @n: the cpu prior to the place to search (i.e. return will be > @n)
228   * @srcp: the cpumask pointer
229   *
230   * Return: >= nr_cpu_ids if no further cpus unset.
231   */
232  static __always_inline
cpumask_next_zero(int n,const struct cpumask * srcp)233  unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
234  {
235  	/* -1 is a legal arg here. */
236  	if (n != -1)
237  		cpumask_check(n);
238  	return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1);
239  }
240  
241  #if NR_CPUS == 1
242  /* Uniprocessor: there is only one valid CPU */
243  static __always_inline
cpumask_local_spread(unsigned int i,int node)244  unsigned int cpumask_local_spread(unsigned int i, int node)
245  {
246  	return 0;
247  }
248  
249  static __always_inline
cpumask_any_and_distribute(const struct cpumask * src1p,const struct cpumask * src2p)250  unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
251  					const struct cpumask *src2p)
252  {
253  	return cpumask_first_and(src1p, src2p);
254  }
255  
256  static __always_inline
cpumask_any_distribute(const struct cpumask * srcp)257  unsigned int cpumask_any_distribute(const struct cpumask *srcp)
258  {
259  	return cpumask_first(srcp);
260  }
261  #else
262  unsigned int cpumask_local_spread(unsigned int i, int node);
263  unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
264  			       const struct cpumask *src2p);
265  unsigned int cpumask_any_distribute(const struct cpumask *srcp);
266  #endif /* NR_CPUS */
267  
268  /**
269   * cpumask_next_and - get the next cpu in *src1p & *src2p
270   * @n: the cpu prior to the place to search (i.e. return will be > @n)
271   * @src1p: the first cpumask pointer
272   * @src2p: the second cpumask pointer
273   *
274   * Return: >= nr_cpu_ids if no further cpus set in both.
275   */
276  static __always_inline
cpumask_next_and(int n,const struct cpumask * src1p,const struct cpumask * src2p)277  unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
278  			      const struct cpumask *src2p)
279  {
280  	/* -1 is a legal arg here. */
281  	if (n != -1)
282  		cpumask_check(n);
283  	return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
284  		small_cpumask_bits, n + 1);
285  }
286  
287  /**
288   * for_each_cpu - iterate over every cpu in a mask
289   * @cpu: the (optionally unsigned) integer iterator
290   * @mask: the cpumask pointer
291   *
292   * After the loop, cpu is >= nr_cpu_ids.
293   */
294  #define for_each_cpu(cpu, mask)				\
295  	for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
296  
297  #if NR_CPUS == 1
298  static __always_inline
cpumask_next_wrap(int n,const struct cpumask * mask,int start,bool wrap)299  unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
300  {
301  	cpumask_check(start);
302  	if (n != -1)
303  		cpumask_check(n);
304  
305  	/*
306  	 * Return the first available CPU when wrapping, or when starting before cpu0,
307  	 * since there is only one valid option.
308  	 */
309  	if (wrap && n >= 0)
310  		return nr_cpumask_bits;
311  
312  	return cpumask_first(mask);
313  }
314  #else
315  unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
316  #endif
317  
318  /**
319   * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
320   * @cpu: the (optionally unsigned) integer iterator
321   * @mask: the cpumask pointer
322   * @start: the start location
323   *
324   * The implementation does not assume any bit in @mask is set (including @start).
325   *
326   * After the loop, cpu is >= nr_cpu_ids.
327   */
328  #define for_each_cpu_wrap(cpu, mask, start)				\
329  	for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
330  
331  /**
332   * for_each_cpu_and - iterate over every cpu in both masks
333   * @cpu: the (optionally unsigned) integer iterator
334   * @mask1: the first cpumask pointer
335   * @mask2: the second cpumask pointer
336   *
337   * This saves a temporary CPU mask in many places.  It is equivalent to:
338   *	struct cpumask tmp;
339   *	cpumask_and(&tmp, &mask1, &mask2);
340   *	for_each_cpu(cpu, &tmp)
341   *		...
342   *
343   * After the loop, cpu is >= nr_cpu_ids.
344   */
345  #define for_each_cpu_and(cpu, mask1, mask2)				\
346  	for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
347  
348  /**
349   * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
350   *			 those present in another.
351   * @cpu: the (optionally unsigned) integer iterator
352   * @mask1: the first cpumask pointer
353   * @mask2: the second cpumask pointer
354   *
355   * This saves a temporary CPU mask in many places.  It is equivalent to:
356   *	struct cpumask tmp;
357   *	cpumask_andnot(&tmp, &mask1, &mask2);
358   *	for_each_cpu(cpu, &tmp)
359   *		...
360   *
361   * After the loop, cpu is >= nr_cpu_ids.
362   */
363  #define for_each_cpu_andnot(cpu, mask1, mask2)				\
364  	for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
365  
366  /**
367   * for_each_cpu_or - iterate over every cpu present in either mask
368   * @cpu: the (optionally unsigned) integer iterator
369   * @mask1: the first cpumask pointer
370   * @mask2: the second cpumask pointer
371   *
372   * This saves a temporary CPU mask in many places.  It is equivalent to:
373   *	struct cpumask tmp;
374   *	cpumask_or(&tmp, &mask1, &mask2);
375   *	for_each_cpu(cpu, &tmp)
376   *		...
377   *
378   * After the loop, cpu is >= nr_cpu_ids.
379   */
380  #define for_each_cpu_or(cpu, mask1, mask2)				\
381  	for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
382  
383  /**
384   * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask.
385   * @cpu: the (optionally unsigned) integer iterator
386   * @mask: the cpumask pointer
387   *
388   * After the loop, cpu is >= nr_cpu_ids.
389   */
390  #define for_each_cpu_from(cpu, mask)				\
391  	for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
392  
393  /**
394   * cpumask_any_but - return a "random" in a cpumask, but not this one.
395   * @mask: the cpumask to search
396   * @cpu: the cpu to ignore.
397   *
398   * Often used to find any cpu but smp_processor_id() in a mask.
399   * Return: >= nr_cpu_ids if no cpus set.
400   */
401  static __always_inline
cpumask_any_but(const struct cpumask * mask,unsigned int cpu)402  unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
403  {
404  	unsigned int i;
405  
406  	cpumask_check(cpu);
407  	for_each_cpu(i, mask)
408  		if (i != cpu)
409  			break;
410  	return i;
411  }
412  
413  /**
414   * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one.
415   * @mask1: the first input cpumask
416   * @mask2: the second input cpumask
417   * @cpu: the cpu to ignore
418   *
419   * Returns >= nr_cpu_ids if no cpus set.
420   */
421  static __always_inline
cpumask_any_and_but(const struct cpumask * mask1,const struct cpumask * mask2,unsigned int cpu)422  unsigned int cpumask_any_and_but(const struct cpumask *mask1,
423  				 const struct cpumask *mask2,
424  				 unsigned int cpu)
425  {
426  	unsigned int i;
427  
428  	cpumask_check(cpu);
429  	i = cpumask_first_and(mask1, mask2);
430  	if (i != cpu)
431  		return i;
432  
433  	return cpumask_next_and(cpu, mask1, mask2);
434  }
435  
436  /**
437   * cpumask_nth - get the Nth cpu in a cpumask
438   * @srcp: the cpumask pointer
439   * @cpu: the Nth cpu to find, starting from 0
440   *
441   * Return: >= nr_cpu_ids if such cpu doesn't exist.
442   */
443  static __always_inline
cpumask_nth(unsigned int cpu,const struct cpumask * srcp)444  unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
445  {
446  	return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
447  }
448  
449  /**
450   * cpumask_nth_and - get the Nth cpu in 2 cpumasks
451   * @srcp1: the cpumask pointer
452   * @srcp2: the cpumask pointer
453   * @cpu: the Nth cpu to find, starting from 0
454   *
455   * Return: >= nr_cpu_ids if such cpu doesn't exist.
456   */
457  static __always_inline
cpumask_nth_and(unsigned int cpu,const struct cpumask * srcp1,const struct cpumask * srcp2)458  unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
459  							const struct cpumask *srcp2)
460  {
461  	return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
462  				small_cpumask_bits, cpumask_check(cpu));
463  }
464  
465  /**
466   * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
467   * @srcp1: the cpumask pointer
468   * @srcp2: the cpumask pointer
469   * @cpu: the Nth cpu to find, starting from 0
470   *
471   * Return: >= nr_cpu_ids if such cpu doesn't exist.
472   */
473  static __always_inline
cpumask_nth_andnot(unsigned int cpu,const struct cpumask * srcp1,const struct cpumask * srcp2)474  unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
475  							const struct cpumask *srcp2)
476  {
477  	return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
478  				small_cpumask_bits, cpumask_check(cpu));
479  }
480  
481  /**
482   * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
483   * @srcp1: the cpumask pointer
484   * @srcp2: the cpumask pointer
485   * @srcp3: the cpumask pointer
486   * @cpu: the Nth cpu to find, starting from 0
487   *
488   * Return: >= nr_cpu_ids if such cpu doesn't exist.
489   */
490  static __always_inline
cpumask_nth_and_andnot(unsigned int cpu,const struct cpumask * srcp1,const struct cpumask * srcp2,const struct cpumask * srcp3)491  unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
492  							const struct cpumask *srcp2,
493  							const struct cpumask *srcp3)
494  {
495  	return find_nth_and_andnot_bit(cpumask_bits(srcp1),
496  					cpumask_bits(srcp2),
497  					cpumask_bits(srcp3),
498  					small_cpumask_bits, cpumask_check(cpu));
499  }
500  
501  #define CPU_BITS_NONE						\
502  {								\
503  	[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL			\
504  }
505  
506  #define CPU_BITS_CPU0						\
507  {								\
508  	[0] =  1UL						\
509  }
510  
511  /**
512   * cpumask_set_cpu - set a cpu in a cpumask
513   * @cpu: cpu number (< nr_cpu_ids)
514   * @dstp: the cpumask pointer
515   */
516  static __always_inline
cpumask_set_cpu(unsigned int cpu,struct cpumask * dstp)517  void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
518  {
519  	set_bit(cpumask_check(cpu), cpumask_bits(dstp));
520  }
521  
522  static __always_inline
__cpumask_set_cpu(unsigned int cpu,struct cpumask * dstp)523  void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
524  {
525  	__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
526  }
527  
528  
529  /**
530   * cpumask_clear_cpu - clear a cpu in a cpumask
531   * @cpu: cpu number (< nr_cpu_ids)
532   * @dstp: the cpumask pointer
533   */
cpumask_clear_cpu(int cpu,struct cpumask * dstp)534  static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
535  {
536  	clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
537  }
538  
__cpumask_clear_cpu(int cpu,struct cpumask * dstp)539  static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
540  {
541  	__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
542  }
543  
544  /**
545   * cpumask_assign_cpu - assign a cpu in a cpumask
546   * @cpu: cpu number (< nr_cpu_ids)
547   * @dstp: the cpumask pointer
548   * @bool: the value to assign
549   */
cpumask_assign_cpu(int cpu,struct cpumask * dstp,bool value)550  static __always_inline void cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
551  {
552  	assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
553  }
554  
__cpumask_assign_cpu(int cpu,struct cpumask * dstp,bool value)555  static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
556  {
557  	__assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
558  }
559  
560  /**
561   * cpumask_test_cpu - test for a cpu in a cpumask
562   * @cpu: cpu number (< nr_cpu_ids)
563   * @cpumask: the cpumask pointer
564   *
565   * Return: true if @cpu is set in @cpumask, else returns false
566   */
567  static __always_inline
cpumask_test_cpu(int cpu,const struct cpumask * cpumask)568  bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
569  {
570  	return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
571  }
572  
573  /**
574   * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
575   * @cpu: cpu number (< nr_cpu_ids)
576   * @cpumask: the cpumask pointer
577   *
578   * test_and_set_bit wrapper for cpumasks.
579   *
580   * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
581   */
582  static __always_inline
cpumask_test_and_set_cpu(int cpu,struct cpumask * cpumask)583  bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
584  {
585  	return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
586  }
587  
588  /**
589   * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
590   * @cpu: cpu number (< nr_cpu_ids)
591   * @cpumask: the cpumask pointer
592   *
593   * test_and_clear_bit wrapper for cpumasks.
594   *
595   * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
596   */
597  static __always_inline
cpumask_test_and_clear_cpu(int cpu,struct cpumask * cpumask)598  bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
599  {
600  	return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
601  }
602  
603  /**
604   * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
605   * @dstp: the cpumask pointer
606   */
cpumask_setall(struct cpumask * dstp)607  static __always_inline void cpumask_setall(struct cpumask *dstp)
608  {
609  	if (small_const_nbits(small_cpumask_bits)) {
610  		cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
611  		return;
612  	}
613  	bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
614  }
615  
616  /**
617   * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
618   * @dstp: the cpumask pointer
619   */
cpumask_clear(struct cpumask * dstp)620  static __always_inline void cpumask_clear(struct cpumask *dstp)
621  {
622  	bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
623  }
624  
625  /**
626   * cpumask_and - *dstp = *src1p & *src2p
627   * @dstp: the cpumask result
628   * @src1p: the first input
629   * @src2p: the second input
630   *
631   * Return: false if *@dstp is empty, else returns true
632   */
633  static __always_inline
cpumask_and(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)634  bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
635  		 const struct cpumask *src2p)
636  {
637  	return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
638  				       cpumask_bits(src2p), small_cpumask_bits);
639  }
640  
641  /**
642   * cpumask_or - *dstp = *src1p | *src2p
643   * @dstp: the cpumask result
644   * @src1p: the first input
645   * @src2p: the second input
646   */
647  static __always_inline
cpumask_or(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)648  void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
649  		const struct cpumask *src2p)
650  {
651  	bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
652  				      cpumask_bits(src2p), small_cpumask_bits);
653  }
654  
655  /**
656   * cpumask_xor - *dstp = *src1p ^ *src2p
657   * @dstp: the cpumask result
658   * @src1p: the first input
659   * @src2p: the second input
660   */
661  static __always_inline
cpumask_xor(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)662  void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
663  		 const struct cpumask *src2p)
664  {
665  	bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
666  				       cpumask_bits(src2p), small_cpumask_bits);
667  }
668  
669  /**
670   * cpumask_andnot - *dstp = *src1p & ~*src2p
671   * @dstp: the cpumask result
672   * @src1p: the first input
673   * @src2p: the second input
674   *
675   * Return: false if *@dstp is empty, else returns true
676   */
677  static __always_inline
cpumask_andnot(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)678  bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
679  		    const struct cpumask *src2p)
680  {
681  	return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
682  					  cpumask_bits(src2p), small_cpumask_bits);
683  }
684  
685  /**
686   * cpumask_equal - *src1p == *src2p
687   * @src1p: the first input
688   * @src2p: the second input
689   *
690   * Return: true if the cpumasks are equal, false if not
691   */
692  static __always_inline
cpumask_equal(const struct cpumask * src1p,const struct cpumask * src2p)693  bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
694  {
695  	return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
696  						 small_cpumask_bits);
697  }
698  
699  /**
700   * cpumask_or_equal - *src1p | *src2p == *src3p
701   * @src1p: the first input
702   * @src2p: the second input
703   * @src3p: the third input
704   *
705   * Return: true if first cpumask ORed with second cpumask == third cpumask,
706   *	   otherwise false
707   */
708  static __always_inline
cpumask_or_equal(const struct cpumask * src1p,const struct cpumask * src2p,const struct cpumask * src3p)709  bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
710  		      const struct cpumask *src3p)
711  {
712  	return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
713  			       cpumask_bits(src3p), small_cpumask_bits);
714  }
715  
716  /**
717   * cpumask_intersects - (*src1p & *src2p) != 0
718   * @src1p: the first input
719   * @src2p: the second input
720   *
721   * Return: true if first cpumask ANDed with second cpumask is non-empty,
722   *	   otherwise false
723   */
724  static __always_inline
cpumask_intersects(const struct cpumask * src1p,const struct cpumask * src2p)725  bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
726  {
727  	return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
728  						      small_cpumask_bits);
729  }
730  
731  /**
732   * cpumask_subset - (*src1p & ~*src2p) == 0
733   * @src1p: the first input
734   * @src2p: the second input
735   *
736   * Return: true if *@src1p is a subset of *@src2p, else returns false
737   */
738  static __always_inline
cpumask_subset(const struct cpumask * src1p,const struct cpumask * src2p)739  bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
740  {
741  	return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
742  						  small_cpumask_bits);
743  }
744  
745  /**
746   * cpumask_empty - *srcp == 0
747   * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
748   *
749   * Return: true if srcp is empty (has no bits set), else false
750   */
cpumask_empty(const struct cpumask * srcp)751  static __always_inline bool cpumask_empty(const struct cpumask *srcp)
752  {
753  	return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
754  }
755  
756  /**
757   * cpumask_full - *srcp == 0xFFFFFFFF...
758   * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
759   *
760   * Return: true if srcp is full (has all bits set), else false
761   */
cpumask_full(const struct cpumask * srcp)762  static __always_inline bool cpumask_full(const struct cpumask *srcp)
763  {
764  	return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
765  }
766  
767  /**
768   * cpumask_weight - Count of bits in *srcp
769   * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
770   *
771   * Return: count of bits set in *srcp
772   */
cpumask_weight(const struct cpumask * srcp)773  static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
774  {
775  	return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
776  }
777  
778  /**
779   * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
780   * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
781   * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
782   *
783   * Return: count of bits set in both *srcp1 and *srcp2
784   */
785  static __always_inline
cpumask_weight_and(const struct cpumask * srcp1,const struct cpumask * srcp2)786  unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
787  {
788  	return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
789  }
790  
791  /**
792   * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
793   * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
794   * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
795   *
796   * Return: count of bits set in both *srcp1 and *srcp2
797   */
798  static __always_inline
cpumask_weight_andnot(const struct cpumask * srcp1,const struct cpumask * srcp2)799  unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
800  				   const struct cpumask *srcp2)
801  {
802  	return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
803  }
804  
805  /**
806   * cpumask_shift_right - *dstp = *srcp >> n
807   * @dstp: the cpumask result
808   * @srcp: the input to shift
809   * @n: the number of bits to shift by
810   */
811  static __always_inline
cpumask_shift_right(struct cpumask * dstp,const struct cpumask * srcp,int n)812  void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
813  {
814  	bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
815  					       small_cpumask_bits);
816  }
817  
818  /**
819   * cpumask_shift_left - *dstp = *srcp << n
820   * @dstp: the cpumask result
821   * @srcp: the input to shift
822   * @n: the number of bits to shift by
823   */
824  static __always_inline
cpumask_shift_left(struct cpumask * dstp,const struct cpumask * srcp,int n)825  void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
826  {
827  	bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
828  					      nr_cpumask_bits);
829  }
830  
831  /**
832   * cpumask_copy - *dstp = *srcp
833   * @dstp: the result
834   * @srcp: the input cpumask
835   */
836  static __always_inline
cpumask_copy(struct cpumask * dstp,const struct cpumask * srcp)837  void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
838  {
839  	bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
840  }
841  
842  /**
843   * cpumask_any - pick a "random" cpu from *srcp
844   * @srcp: the input cpumask
845   *
846   * Return: >= nr_cpu_ids if no cpus set.
847   */
848  #define cpumask_any(srcp) cpumask_first(srcp)
849  
850  /**
851   * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
852   * @mask1: the first input cpumask
853   * @mask2: the second input cpumask
854   *
855   * Return: >= nr_cpu_ids if no cpus set.
856   */
857  #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
858  
859  /**
860   * cpumask_of - the cpumask containing just a given cpu
861   * @cpu: the cpu (<= nr_cpu_ids)
862   */
863  #define cpumask_of(cpu) (get_cpu_mask(cpu))
864  
865  /**
866   * cpumask_parse_user - extract a cpumask from a user string
867   * @buf: the buffer to extract from
868   * @len: the length of the buffer
869   * @dstp: the cpumask to set.
870   *
871   * Return: -errno, or 0 for success.
872   */
873  static __always_inline
cpumask_parse_user(const char __user * buf,int len,struct cpumask * dstp)874  int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
875  {
876  	return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
877  }
878  
879  /**
880   * cpumask_parselist_user - extract a cpumask from a user string
881   * @buf: the buffer to extract from
882   * @len: the length of the buffer
883   * @dstp: the cpumask to set.
884   *
885   * Return: -errno, or 0 for success.
886   */
887  static __always_inline
cpumask_parselist_user(const char __user * buf,int len,struct cpumask * dstp)888  int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
889  {
890  	return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
891  				     nr_cpumask_bits);
892  }
893  
894  /**
895   * cpumask_parse - extract a cpumask from a string
896   * @buf: the buffer to extract from
897   * @dstp: the cpumask to set.
898   *
899   * Return: -errno, or 0 for success.
900   */
cpumask_parse(const char * buf,struct cpumask * dstp)901  static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
902  {
903  	return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
904  }
905  
906  /**
907   * cpulist_parse - extract a cpumask from a user string of ranges
908   * @buf: the buffer to extract from
909   * @dstp: the cpumask to set.
910   *
911   * Return: -errno, or 0 for success.
912   */
cpulist_parse(const char * buf,struct cpumask * dstp)913  static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
914  {
915  	return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
916  }
917  
918  /**
919   * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
920   *
921   * Return: size to allocate for a &struct cpumask in bytes
922   */
cpumask_size(void)923  static __always_inline unsigned int cpumask_size(void)
924  {
925  	return bitmap_size(large_cpumask_bits);
926  }
927  
928  #ifdef CONFIG_CPUMASK_OFFSTACK
929  
930  #define this_cpu_cpumask_var_ptr(x)	this_cpu_read(x)
931  #define __cpumask_var_read_mostly	__read_mostly
932  
933  bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
934  
935  static __always_inline
zalloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)936  bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
937  {
938  	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
939  }
940  
941  /**
942   * alloc_cpumask_var - allocate a struct cpumask
943   * @mask: pointer to cpumask_var_t where the cpumask is returned
944   * @flags: GFP_ flags
945   *
946   * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
947   * a nop returning a constant 1 (in <linux/cpumask.h>).
948   *
949   * See alloc_cpumask_var_node.
950   *
951   * Return: %true if allocation succeeded, %false if not
952   */
953  static __always_inline
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)954  bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
955  {
956  	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
957  }
958  
959  static __always_inline
zalloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)960  bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
961  {
962  	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
963  }
964  
965  void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
966  void free_cpumask_var(cpumask_var_t mask);
967  void free_bootmem_cpumask_var(cpumask_var_t mask);
968  
cpumask_available(cpumask_var_t mask)969  static __always_inline bool cpumask_available(cpumask_var_t mask)
970  {
971  	return mask != NULL;
972  }
973  
974  #else
975  
976  #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
977  #define __cpumask_var_read_mostly
978  
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)979  static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
980  {
981  	return true;
982  }
983  
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)984  static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
985  					  int node)
986  {
987  	return true;
988  }
989  
zalloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)990  static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
991  {
992  	cpumask_clear(*mask);
993  	return true;
994  }
995  
zalloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)996  static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
997  					  int node)
998  {
999  	cpumask_clear(*mask);
1000  	return true;
1001  }
1002  
alloc_bootmem_cpumask_var(cpumask_var_t * mask)1003  static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1004  {
1005  }
1006  
free_cpumask_var(cpumask_var_t mask)1007  static __always_inline void free_cpumask_var(cpumask_var_t mask)
1008  {
1009  }
1010  
free_bootmem_cpumask_var(cpumask_var_t mask)1011  static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
1012  {
1013  }
1014  
cpumask_available(cpumask_var_t mask)1015  static __always_inline bool cpumask_available(cpumask_var_t mask)
1016  {
1017  	return true;
1018  }
1019  #endif /* CONFIG_CPUMASK_OFFSTACK */
1020  
1021  DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
1022  
1023  /* It's common to want to use cpu_all_mask in struct member initializers,
1024   * so it has to refer to an address rather than a pointer. */
1025  extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
1026  #define cpu_all_mask to_cpumask(cpu_all_bits)
1027  
1028  /* First bits of cpu_bit_bitmap are in fact unset. */
1029  #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
1030  
1031  #if NR_CPUS == 1
1032  /* Uniprocessor: the possible/online/present masks are always "1" */
1033  #define for_each_possible_cpu(cpu)	for ((cpu) = 0; (cpu) < 1; (cpu)++)
1034  #define for_each_online_cpu(cpu)	for ((cpu) = 0; (cpu) < 1; (cpu)++)
1035  #define for_each_present_cpu(cpu)	for ((cpu) = 0; (cpu) < 1; (cpu)++)
1036  #else
1037  #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
1038  #define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
1039  #define for_each_enabled_cpu(cpu)   for_each_cpu((cpu), cpu_enabled_mask)
1040  #define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
1041  #endif
1042  
1043  /* Wrappers for arch boot code to manipulate normally-constant masks */
1044  void init_cpu_present(const struct cpumask *src);
1045  void init_cpu_possible(const struct cpumask *src);
1046  void init_cpu_online(const struct cpumask *src);
1047  
1048  #define assign_cpu(cpu, mask, val)	\
1049  	assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
1050  
1051  #define set_cpu_possible(cpu, possible)	assign_cpu((cpu), &__cpu_possible_mask, (possible))
1052  #define set_cpu_enabled(cpu, enabled)	assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
1053  #define set_cpu_present(cpu, present)	assign_cpu((cpu), &__cpu_present_mask, (present))
1054  #define set_cpu_active(cpu, active)	assign_cpu((cpu), &__cpu_active_mask, (active))
1055  #define set_cpu_dying(cpu, dying)	assign_cpu((cpu), &__cpu_dying_mask, (dying))
1056  
1057  void set_cpu_online(unsigned int cpu, bool online);
1058  
1059  /**
1060   * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
1061   * @bitmap: the bitmap
1062   *
1063   * There are a few places where cpumask_var_t isn't appropriate and
1064   * static cpumasks must be used (eg. very early boot), yet we don't
1065   * expose the definition of 'struct cpumask'.
1066   *
1067   * This does the conversion, and can be used as a constant initializer.
1068   */
1069  #define to_cpumask(bitmap)						\
1070  	((struct cpumask *)(1 ? (bitmap)				\
1071  			    : (void *)sizeof(__check_is_bitmap(bitmap))))
1072  
__check_is_bitmap(const unsigned long * bitmap)1073  static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
1074  {
1075  	return 1;
1076  }
1077  
1078  /*
1079   * Special-case data structure for "single bit set only" constant CPU masks.
1080   *
1081   * We pre-generate all the 64 (or 32) possible bit positions, with enough
1082   * padding to the left and the right, and return the constant pointer
1083   * appropriately offset.
1084   */
1085  extern const unsigned long
1086  	cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
1087  
get_cpu_mask(unsigned int cpu)1088  static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
1089  {
1090  	const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
1091  	p -= cpu / BITS_PER_LONG;
1092  	return to_cpumask(p);
1093  }
1094  
1095  #if NR_CPUS > 1
1096  /**
1097   * num_online_cpus() - Read the number of online CPUs
1098   *
1099   * Despite the fact that __num_online_cpus is of type atomic_t, this
1100   * interface gives only a momentary snapshot and is not protected against
1101   * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
1102   * region.
1103   *
1104   * Return: momentary snapshot of the number of online CPUs
1105   */
num_online_cpus(void)1106  static __always_inline unsigned int num_online_cpus(void)
1107  {
1108  	return raw_atomic_read(&__num_online_cpus);
1109  }
1110  #define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
1111  #define num_enabled_cpus()	cpumask_weight(cpu_enabled_mask)
1112  #define num_present_cpus()	cpumask_weight(cpu_present_mask)
1113  #define num_active_cpus()	cpumask_weight(cpu_active_mask)
1114  
cpu_online(unsigned int cpu)1115  static __always_inline bool cpu_online(unsigned int cpu)
1116  {
1117  	return cpumask_test_cpu(cpu, cpu_online_mask);
1118  }
1119  
cpu_enabled(unsigned int cpu)1120  static __always_inline bool cpu_enabled(unsigned int cpu)
1121  {
1122  	return cpumask_test_cpu(cpu, cpu_enabled_mask);
1123  }
1124  
cpu_possible(unsigned int cpu)1125  static __always_inline bool cpu_possible(unsigned int cpu)
1126  {
1127  	return cpumask_test_cpu(cpu, cpu_possible_mask);
1128  }
1129  
cpu_present(unsigned int cpu)1130  static __always_inline bool cpu_present(unsigned int cpu)
1131  {
1132  	return cpumask_test_cpu(cpu, cpu_present_mask);
1133  }
1134  
cpu_active(unsigned int cpu)1135  static __always_inline bool cpu_active(unsigned int cpu)
1136  {
1137  	return cpumask_test_cpu(cpu, cpu_active_mask);
1138  }
1139  
cpu_dying(unsigned int cpu)1140  static __always_inline bool cpu_dying(unsigned int cpu)
1141  {
1142  	return cpumask_test_cpu(cpu, cpu_dying_mask);
1143  }
1144  
1145  #else
1146  
1147  #define num_online_cpus()	1U
1148  #define num_possible_cpus()	1U
1149  #define num_enabled_cpus()	1U
1150  #define num_present_cpus()	1U
1151  #define num_active_cpus()	1U
1152  
cpu_online(unsigned int cpu)1153  static __always_inline bool cpu_online(unsigned int cpu)
1154  {
1155  	return cpu == 0;
1156  }
1157  
cpu_possible(unsigned int cpu)1158  static __always_inline bool cpu_possible(unsigned int cpu)
1159  {
1160  	return cpu == 0;
1161  }
1162  
cpu_enabled(unsigned int cpu)1163  static __always_inline bool cpu_enabled(unsigned int cpu)
1164  {
1165  	return cpu == 0;
1166  }
1167  
cpu_present(unsigned int cpu)1168  static __always_inline bool cpu_present(unsigned int cpu)
1169  {
1170  	return cpu == 0;
1171  }
1172  
cpu_active(unsigned int cpu)1173  static __always_inline bool cpu_active(unsigned int cpu)
1174  {
1175  	return cpu == 0;
1176  }
1177  
cpu_dying(unsigned int cpu)1178  static __always_inline bool cpu_dying(unsigned int cpu)
1179  {
1180  	return false;
1181  }
1182  
1183  #endif /* NR_CPUS > 1 */
1184  
1185  #define cpu_is_offline(cpu)	unlikely(!cpu_online(cpu))
1186  
1187  #if NR_CPUS <= BITS_PER_LONG
1188  #define CPU_BITS_ALL						\
1189  {								\
1190  	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1191  }
1192  
1193  #else /* NR_CPUS > BITS_PER_LONG */
1194  
1195  #define CPU_BITS_ALL						\
1196  {								\
1197  	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,		\
1198  	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1199  }
1200  #endif /* NR_CPUS > BITS_PER_LONG */
1201  
1202  /**
1203   * cpumap_print_to_pagebuf  - copies the cpumask into the buffer either
1204   *	as comma-separated list of cpus or hex values of cpumask
1205   * @list: indicates whether the cpumap must be list
1206   * @mask: the cpumask to copy
1207   * @buf: the buffer to copy into
1208   *
1209   * Return: the length of the (null-terminated) @buf string, zero if
1210   * nothing is copied.
1211   */
1212  static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list,char * buf,const struct cpumask * mask)1213  cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
1214  {
1215  	return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
1216  				      nr_cpu_ids);
1217  }
1218  
1219  /**
1220   * cpumap_print_bitmask_to_buf  - copies the cpumask into the buffer as
1221   *	hex values of cpumask
1222   *
1223   * @buf: the buffer to copy into
1224   * @mask: the cpumask to copy
1225   * @off: in the string from which we are copying, we copy to @buf
1226   * @count: the maximum number of bytes to print
1227   *
1228   * The function prints the cpumask into the buffer as hex values of
1229   * cpumask; Typically used by bin_attribute to export cpumask bitmask
1230   * ABI.
1231   *
1232   * Return: the length of how many bytes have been copied, excluding
1233   * terminating '\0'.
1234   */
1235  static __always_inline
cpumap_print_bitmask_to_buf(char * buf,const struct cpumask * mask,loff_t off,size_t count)1236  ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
1237  				    loff_t off, size_t count)
1238  {
1239  	return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
1240  				   nr_cpu_ids, off, count) - 1;
1241  }
1242  
1243  /**
1244   * cpumap_print_list_to_buf  - copies the cpumask into the buffer as
1245   *	comma-separated list of cpus
1246   * @buf: the buffer to copy into
1247   * @mask: the cpumask to copy
1248   * @off: in the string from which we are copying, we copy to @buf
1249   * @count: the maximum number of bytes to print
1250   *
1251   * Everything is same with the above cpumap_print_bitmask_to_buf()
1252   * except the print format.
1253   *
1254   * Return: the length of how many bytes have been copied, excluding
1255   * terminating '\0'.
1256   */
1257  static __always_inline
cpumap_print_list_to_buf(char * buf,const struct cpumask * mask,loff_t off,size_t count)1258  ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
1259  				 loff_t off, size_t count)
1260  {
1261  	return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
1262  				   nr_cpu_ids, off, count) - 1;
1263  }
1264  
1265  #if NR_CPUS <= BITS_PER_LONG
1266  #define CPU_MASK_ALL							\
1267  (cpumask_t) { {								\
1268  	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1269  } }
1270  #else
1271  #define CPU_MASK_ALL							\
1272  (cpumask_t) { {								\
1273  	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,			\
1274  	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1275  } }
1276  #endif /* NR_CPUS > BITS_PER_LONG */
1277  
1278  #define CPU_MASK_NONE							\
1279  (cpumask_t) { {								\
1280  	[0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL				\
1281  } }
1282  
1283  #define CPU_MASK_CPU0							\
1284  (cpumask_t) { {								\
1285  	[0] =  1UL							\
1286  } }
1287  
1288  /*
1289   * Provide a valid theoretical max size for cpumap and cpulist sysfs files
1290   * to avoid breaking userspace which may allocate a buffer based on the size
1291   * reported by e.g. fstat.
1292   *
1293   * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
1294   *
1295   * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
1296   * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
1297   * cover a worst-case of every other cpu being on one of two nodes for a
1298   * very large NR_CPUS.
1299   *
1300   *  Use PAGE_SIZE as a minimum for smaller configurations while avoiding
1301   *  unsigned comparison to -1.
1302   */
1303  #define CPUMAP_FILE_MAX_BYTES  (((NR_CPUS * 9)/32 > PAGE_SIZE) \
1304  					? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
1305  #define CPULIST_FILE_MAX_BYTES  (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
1306  
1307  #endif /* __LINUX_CPUMASK_H */
1308