1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/slab.h>
3  #include <linux/kernel.h>
4  #include <linux/bitops.h>
5  #include <linux/cpumask.h>
6  #include <linux/export.h>
7  #include <linux/memblock.h>
8  #include <linux/numa.h>
9  
10  /**
11   * cpumask_next_wrap - helper to implement for_each_cpu_wrap
12   * @n: the cpu prior to the place to search
13   * @mask: the cpumask pointer
14   * @start: the start point of the iteration
15   * @wrap: assume @n crossing @start terminates the iteration
16   *
17   * Return: >= nr_cpu_ids on completion
18   *
19   * Note: the @wrap argument is required for the start condition when
20   * we cannot assume @start is set in @mask.
21   */
cpumask_next_wrap(int n,const struct cpumask * mask,int start,bool wrap)22  unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
23  {
24  	unsigned int next;
25  
26  again:
27  	next = cpumask_next(n, mask);
28  
29  	if (wrap && n < start && next >= start) {
30  		return nr_cpumask_bits;
31  
32  	} else if (next >= nr_cpumask_bits) {
33  		wrap = true;
34  		n = -1;
35  		goto again;
36  	}
37  
38  	return next;
39  }
40  EXPORT_SYMBOL(cpumask_next_wrap);
41  
42  /* These are not inline because of header tangles. */
43  #ifdef CONFIG_CPUMASK_OFFSTACK
44  /**
45   * alloc_cpumask_var_node - allocate a struct cpumask on a given node
46   * @mask: pointer to cpumask_var_t where the cpumask is returned
47   * @flags: GFP_ flags
48   * @node: memory node from which to allocate or %NUMA_NO_NODE
49   *
50   * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
51   * a nop returning a constant 1 (in <linux/cpumask.h>).
52   *
53   * Return: TRUE if memory allocation succeeded, FALSE otherwise.
54   *
55   * In addition, mask will be NULL if this fails.  Note that gcc is
56   * usually smart enough to know that mask can never be NULL if
57   * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
58   * too.
59   */
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)60  bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
61  {
62  	*mask = kmalloc_node(cpumask_size(), flags, node);
63  
64  #ifdef CONFIG_DEBUG_PER_CPU_MAPS
65  	if (!*mask) {
66  		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
67  		dump_stack();
68  	}
69  #endif
70  
71  	return *mask != NULL;
72  }
73  EXPORT_SYMBOL(alloc_cpumask_var_node);
74  
75  /**
76   * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
77   * @mask: pointer to cpumask_var_t where the cpumask is returned
78   *
79   * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
80   * a nop (in <linux/cpumask.h>).
81   * Either returns an allocated (zero-filled) cpumask, or causes the
82   * system to panic.
83   */
alloc_bootmem_cpumask_var(cpumask_var_t * mask)84  void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
85  {
86  	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
87  	if (!*mask)
88  		panic("%s: Failed to allocate %u bytes\n", __func__,
89  		      cpumask_size());
90  }
91  
92  /**
93   * free_cpumask_var - frees memory allocated for a struct cpumask.
94   * @mask: cpumask to free
95   *
96   * This is safe on a NULL mask.
97   */
free_cpumask_var(cpumask_var_t mask)98  void free_cpumask_var(cpumask_var_t mask)
99  {
100  	kfree(mask);
101  }
102  EXPORT_SYMBOL(free_cpumask_var);
103  
104  /**
105   * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
106   * @mask: cpumask to free
107   */
free_bootmem_cpumask_var(cpumask_var_t mask)108  void __init free_bootmem_cpumask_var(cpumask_var_t mask)
109  {
110  	memblock_free(mask, cpumask_size());
111  }
112  #endif
113  
114  /**
115   * cpumask_local_spread - select the i'th cpu based on NUMA distances
116   * @i: index number
117   * @node: local numa_node
118   *
119   * Return: online CPU according to a numa aware policy; local cpus are returned
120   * first, followed by non-local ones, then it wraps around.
121   *
122   * For those who wants to enumerate all CPUs based on their NUMA distances,
123   * i.e. call this function in a loop, like:
124   *
125   * for (i = 0; i < num_online_cpus(); i++) {
126   *	cpu = cpumask_local_spread(i, node);
127   *	do_something(cpu);
128   * }
129   *
130   * There's a better alternative based on for_each()-like iterators:
131   *
132   *	for_each_numa_hop_mask(mask, node) {
133   *		for_each_cpu_andnot(cpu, mask, prev)
134   *			do_something(cpu);
135   *		prev = mask;
136   *	}
137   *
138   * It's simpler and more verbose than above. Complexity of iterator-based
139   * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
140   * cpumask_local_spread() when called for each cpu is
141   * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
142   */
cpumask_local_spread(unsigned int i,int node)143  unsigned int cpumask_local_spread(unsigned int i, int node)
144  {
145  	unsigned int cpu;
146  
147  	/* Wrap: we always want a cpu. */
148  	i %= num_online_cpus();
149  
150  	cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
151  
152  	WARN_ON(cpu >= nr_cpu_ids);
153  	return cpu;
154  }
155  EXPORT_SYMBOL(cpumask_local_spread);
156  
157  static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
158  
159  /**
160   * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
161   * @src1p: first &cpumask for intersection
162   * @src2p: second &cpumask for intersection
163   *
164   * Iterated calls using the same srcp1 and srcp2 will be distributed within
165   * their intersection.
166   *
167   * Return: >= nr_cpu_ids if the intersection is empty.
168   */
cpumask_any_and_distribute(const struct cpumask * src1p,const struct cpumask * src2p)169  unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
170  			       const struct cpumask *src2p)
171  {
172  	unsigned int next, prev;
173  
174  	/* NOTE: our first selection will skip 0. */
175  	prev = __this_cpu_read(distribute_cpu_mask_prev);
176  
177  	next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
178  					nr_cpumask_bits, prev + 1);
179  	if (next < nr_cpu_ids)
180  		__this_cpu_write(distribute_cpu_mask_prev, next);
181  
182  	return next;
183  }
184  EXPORT_SYMBOL(cpumask_any_and_distribute);
185  
186  /**
187   * cpumask_any_distribute - Return an arbitrary cpu from srcp
188   * @srcp: &cpumask for selection
189   *
190   * Return: >= nr_cpu_ids if the intersection is empty.
191   */
cpumask_any_distribute(const struct cpumask * srcp)192  unsigned int cpumask_any_distribute(const struct cpumask *srcp)
193  {
194  	unsigned int next, prev;
195  
196  	/* NOTE: our first selection will skip 0. */
197  	prev = __this_cpu_read(distribute_cpu_mask_prev);
198  	next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
199  	if (next < nr_cpu_ids)
200  		__this_cpu_write(distribute_cpu_mask_prev, next);
201  
202  	return next;
203  }
204  EXPORT_SYMBOL(cpumask_any_distribute);
205