1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * padata.c - generic interface to process data streams in parallel
4   *
5   * See Documentation/core-api/padata.rst for more information.
6   *
7   * Copyright (C) 2008, 2009 secunet Security Networks AG
8   * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9   *
10   * Copyright (c) 2020 Oracle and/or its affiliates.
11   * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
12   */
13  
14  #include <linux/completion.h>
15  #include <linux/export.h>
16  #include <linux/cpumask.h>
17  #include <linux/err.h>
18  #include <linux/cpu.h>
19  #include <linux/padata.h>
20  #include <linux/mutex.h>
21  #include <linux/sched.h>
22  #include <linux/slab.h>
23  #include <linux/sysfs.h>
24  #include <linux/rcupdate.h>
25  
26  #define	PADATA_WORK_ONSTACK	1	/* Work's memory is on stack */
27  
28  struct padata_work {
29  	struct work_struct	pw_work;
30  	struct list_head	pw_list;  /* padata_free_works linkage */
31  	void			*pw_data;
32  };
33  
34  static DEFINE_SPINLOCK(padata_works_lock);
35  static struct padata_work *padata_works;
36  static LIST_HEAD(padata_free_works);
37  
38  struct padata_mt_job_state {
39  	spinlock_t		lock;
40  	struct completion	completion;
41  	struct padata_mt_job	*job;
42  	int			nworks;
43  	int			nworks_fini;
44  	unsigned long		chunk_size;
45  };
46  
47  static void padata_free_pd(struct parallel_data *pd);
48  static void __init padata_mt_helper(struct work_struct *work);
49  
padata_index_to_cpu(struct parallel_data * pd,int cpu_index)50  static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
51  {
52  	int cpu, target_cpu;
53  
54  	target_cpu = cpumask_first(pd->cpumask.pcpu);
55  	for (cpu = 0; cpu < cpu_index; cpu++)
56  		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
57  
58  	return target_cpu;
59  }
60  
padata_cpu_hash(struct parallel_data * pd,unsigned int seq_nr)61  static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
62  {
63  	/*
64  	 * Hash the sequence numbers to the cpus by taking
65  	 * seq_nr mod. number of cpus in use.
66  	 */
67  	int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
68  
69  	return padata_index_to_cpu(pd, cpu_index);
70  }
71  
padata_work_alloc(void)72  static struct padata_work *padata_work_alloc(void)
73  {
74  	struct padata_work *pw;
75  
76  	lockdep_assert_held(&padata_works_lock);
77  
78  	if (list_empty(&padata_free_works))
79  		return NULL;	/* No more work items allowed to be queued. */
80  
81  	pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
82  	list_del(&pw->pw_list);
83  	return pw;
84  }
85  
86  /*
87   * This function is marked __ref because this function may be optimized in such
88   * a way that it directly refers to work_fn's address, which causes modpost to
89   * complain when work_fn is marked __init. This scenario was observed with clang
90   * LTO, where padata_work_init() was optimized to refer directly to
91   * padata_mt_helper() because the calls to padata_work_init() with other work_fn
92   * values were eliminated or inlined.
93   */
padata_work_init(struct padata_work * pw,work_func_t work_fn,void * data,int flags)94  static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn,
95  				   void *data, int flags)
96  {
97  	if (flags & PADATA_WORK_ONSTACK)
98  		INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
99  	else
100  		INIT_WORK(&pw->pw_work, work_fn);
101  	pw->pw_data = data;
102  }
103  
padata_work_alloc_mt(int nworks,void * data,struct list_head * head)104  static int __init padata_work_alloc_mt(int nworks, void *data,
105  				       struct list_head *head)
106  {
107  	int i;
108  
109  	spin_lock_bh(&padata_works_lock);
110  	/* Start at 1 because the current task participates in the job. */
111  	for (i = 1; i < nworks; ++i) {
112  		struct padata_work *pw = padata_work_alloc();
113  
114  		if (!pw)
115  			break;
116  		padata_work_init(pw, padata_mt_helper, data, 0);
117  		list_add(&pw->pw_list, head);
118  	}
119  	spin_unlock_bh(&padata_works_lock);
120  
121  	return i;
122  }
123  
padata_work_free(struct padata_work * pw)124  static void padata_work_free(struct padata_work *pw)
125  {
126  	lockdep_assert_held(&padata_works_lock);
127  	list_add(&pw->pw_list, &padata_free_works);
128  }
129  
padata_works_free(struct list_head * works)130  static void __init padata_works_free(struct list_head *works)
131  {
132  	struct padata_work *cur, *next;
133  
134  	if (list_empty(works))
135  		return;
136  
137  	spin_lock_bh(&padata_works_lock);
138  	list_for_each_entry_safe(cur, next, works, pw_list) {
139  		list_del(&cur->pw_list);
140  		padata_work_free(cur);
141  	}
142  	spin_unlock_bh(&padata_works_lock);
143  }
144  
padata_parallel_worker(struct work_struct * parallel_work)145  static void padata_parallel_worker(struct work_struct *parallel_work)
146  {
147  	struct padata_work *pw = container_of(parallel_work, struct padata_work,
148  					      pw_work);
149  	struct padata_priv *padata = pw->pw_data;
150  
151  	local_bh_disable();
152  	padata->parallel(padata);
153  	spin_lock(&padata_works_lock);
154  	padata_work_free(pw);
155  	spin_unlock(&padata_works_lock);
156  	local_bh_enable();
157  }
158  
159  /**
160   * padata_do_parallel - padata parallelization function
161   *
162   * @ps: padatashell
163   * @padata: object to be parallelized
164   * @cb_cpu: pointer to the CPU that the serialization callback function should
165   *          run on.  If it's not in the serial cpumask of @pinst
166   *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
167   *          none found, returns -EINVAL.
168   *
169   * The parallelization callback function will run with BHs off.
170   * Note: Every object which is parallelized by padata_do_parallel
171   * must be seen by padata_do_serial.
172   *
173   * Return: 0 on success or else negative error code.
174   */
padata_do_parallel(struct padata_shell * ps,struct padata_priv * padata,int * cb_cpu)175  int padata_do_parallel(struct padata_shell *ps,
176  		       struct padata_priv *padata, int *cb_cpu)
177  {
178  	struct padata_instance *pinst = ps->pinst;
179  	int i, cpu, cpu_index, err;
180  	struct parallel_data *pd;
181  	struct padata_work *pw;
182  
183  	rcu_read_lock_bh();
184  
185  	pd = rcu_dereference_bh(ps->pd);
186  
187  	err = -EINVAL;
188  	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
189  		goto out;
190  
191  	if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
192  		if (cpumask_empty(pd->cpumask.cbcpu))
193  			goto out;
194  
195  		/* Select an alternate fallback CPU and notify the caller. */
196  		cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
197  
198  		cpu = cpumask_first(pd->cpumask.cbcpu);
199  		for (i = 0; i < cpu_index; i++)
200  			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
201  
202  		*cb_cpu = cpu;
203  	}
204  
205  	err = -EBUSY;
206  	if ((pinst->flags & PADATA_RESET))
207  		goto out;
208  
209  	refcount_inc(&pd->refcnt);
210  	padata->pd = pd;
211  	padata->cb_cpu = *cb_cpu;
212  
213  	spin_lock(&padata_works_lock);
214  	padata->seq_nr = ++pd->seq_nr;
215  	pw = padata_work_alloc();
216  	spin_unlock(&padata_works_lock);
217  
218  	if (!pw) {
219  		/* Maximum works limit exceeded, run in the current task. */
220  		padata->parallel(padata);
221  	}
222  
223  	rcu_read_unlock_bh();
224  
225  	if (pw) {
226  		padata_work_init(pw, padata_parallel_worker, padata, 0);
227  		queue_work(pinst->parallel_wq, &pw->pw_work);
228  	}
229  
230  	return 0;
231  out:
232  	rcu_read_unlock_bh();
233  
234  	return err;
235  }
236  EXPORT_SYMBOL(padata_do_parallel);
237  
238  /*
239   * padata_find_next - Find the next object that needs serialization.
240   *
241   * Return:
242   * * A pointer to the control struct of the next object that needs
243   *   serialization, if present in one of the percpu reorder queues.
244   * * NULL, if the next object that needs serialization will
245   *   be parallel processed by another cpu and is not yet present in
246   *   the cpu's reorder queue.
247   */
padata_find_next(struct parallel_data * pd,bool remove_object)248  static struct padata_priv *padata_find_next(struct parallel_data *pd,
249  					    bool remove_object)
250  {
251  	struct padata_priv *padata;
252  	struct padata_list *reorder;
253  	int cpu = pd->cpu;
254  
255  	reorder = per_cpu_ptr(pd->reorder_list, cpu);
256  
257  	spin_lock(&reorder->lock);
258  	if (list_empty(&reorder->list)) {
259  		spin_unlock(&reorder->lock);
260  		return NULL;
261  	}
262  
263  	padata = list_entry(reorder->list.next, struct padata_priv, list);
264  
265  	/*
266  	 * Checks the rare case where two or more parallel jobs have hashed to
267  	 * the same CPU and one of the later ones finishes first.
268  	 */
269  	if (padata->seq_nr != pd->processed) {
270  		spin_unlock(&reorder->lock);
271  		return NULL;
272  	}
273  
274  	if (remove_object) {
275  		list_del_init(&padata->list);
276  		++pd->processed;
277  		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
278  	}
279  
280  	spin_unlock(&reorder->lock);
281  	return padata;
282  }
283  
padata_reorder(struct parallel_data * pd)284  static void padata_reorder(struct parallel_data *pd)
285  {
286  	struct padata_instance *pinst = pd->ps->pinst;
287  	int cb_cpu;
288  	struct padata_priv *padata;
289  	struct padata_serial_queue *squeue;
290  	struct padata_list *reorder;
291  
292  	/*
293  	 * We need to ensure that only one cpu can work on dequeueing of
294  	 * the reorder queue the time. Calculating in which percpu reorder
295  	 * queue the next object will arrive takes some time. A spinlock
296  	 * would be highly contended. Also it is not clear in which order
297  	 * the objects arrive to the reorder queues. So a cpu could wait to
298  	 * get the lock just to notice that there is nothing to do at the
299  	 * moment. Therefore we use a trylock and let the holder of the lock
300  	 * care for all the objects enqueued during the holdtime of the lock.
301  	 */
302  	if (!spin_trylock_bh(&pd->lock))
303  		return;
304  
305  	while (1) {
306  		padata = padata_find_next(pd, true);
307  
308  		/*
309  		 * If the next object that needs serialization is parallel
310  		 * processed by another cpu and is still on it's way to the
311  		 * cpu's reorder queue, nothing to do for now.
312  		 */
313  		if (!padata)
314  			break;
315  
316  		cb_cpu = padata->cb_cpu;
317  		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
318  
319  		spin_lock(&squeue->serial.lock);
320  		list_add_tail(&padata->list, &squeue->serial.list);
321  		spin_unlock(&squeue->serial.lock);
322  
323  		queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
324  	}
325  
326  	spin_unlock_bh(&pd->lock);
327  
328  	/*
329  	 * The next object that needs serialization might have arrived to
330  	 * the reorder queues in the meantime.
331  	 *
332  	 * Ensure reorder queue is read after pd->lock is dropped so we see
333  	 * new objects from another task in padata_do_serial.  Pairs with
334  	 * smp_mb in padata_do_serial.
335  	 */
336  	smp_mb();
337  
338  	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
339  	if (!list_empty(&reorder->list) && padata_find_next(pd, false))
340  		queue_work(pinst->serial_wq, &pd->reorder_work);
341  }
342  
invoke_padata_reorder(struct work_struct * work)343  static void invoke_padata_reorder(struct work_struct *work)
344  {
345  	struct parallel_data *pd;
346  
347  	local_bh_disable();
348  	pd = container_of(work, struct parallel_data, reorder_work);
349  	padata_reorder(pd);
350  	local_bh_enable();
351  }
352  
padata_serial_worker(struct work_struct * serial_work)353  static void padata_serial_worker(struct work_struct *serial_work)
354  {
355  	struct padata_serial_queue *squeue;
356  	struct parallel_data *pd;
357  	LIST_HEAD(local_list);
358  	int cnt;
359  
360  	local_bh_disable();
361  	squeue = container_of(serial_work, struct padata_serial_queue, work);
362  	pd = squeue->pd;
363  
364  	spin_lock(&squeue->serial.lock);
365  	list_replace_init(&squeue->serial.list, &local_list);
366  	spin_unlock(&squeue->serial.lock);
367  
368  	cnt = 0;
369  
370  	while (!list_empty(&local_list)) {
371  		struct padata_priv *padata;
372  
373  		padata = list_entry(local_list.next,
374  				    struct padata_priv, list);
375  
376  		list_del_init(&padata->list);
377  
378  		padata->serial(padata);
379  		cnt++;
380  	}
381  	local_bh_enable();
382  
383  	if (refcount_sub_and_test(cnt, &pd->refcnt))
384  		padata_free_pd(pd);
385  }
386  
387  /**
388   * padata_do_serial - padata serialization function
389   *
390   * @padata: object to be serialized.
391   *
392   * padata_do_serial must be called for every parallelized object.
393   * The serialization callback function will run with BHs off.
394   */
padata_do_serial(struct padata_priv * padata)395  void padata_do_serial(struct padata_priv *padata)
396  {
397  	struct parallel_data *pd = padata->pd;
398  	int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
399  	struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
400  	struct padata_priv *cur;
401  	struct list_head *pos;
402  
403  	spin_lock(&reorder->lock);
404  	/* Sort in ascending order of sequence number. */
405  	list_for_each_prev(pos, &reorder->list) {
406  		cur = list_entry(pos, struct padata_priv, list);
407  		/* Compare by difference to consider integer wrap around */
408  		if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
409  			break;
410  	}
411  	list_add(&padata->list, pos);
412  	spin_unlock(&reorder->lock);
413  
414  	/*
415  	 * Ensure the addition to the reorder list is ordered correctly
416  	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
417  	 * in padata_reorder.
418  	 */
419  	smp_mb();
420  
421  	padata_reorder(pd);
422  }
423  EXPORT_SYMBOL(padata_do_serial);
424  
padata_setup_cpumasks(struct padata_instance * pinst)425  static int padata_setup_cpumasks(struct padata_instance *pinst)
426  {
427  	struct workqueue_attrs *attrs;
428  	int err;
429  
430  	attrs = alloc_workqueue_attrs();
431  	if (!attrs)
432  		return -ENOMEM;
433  
434  	/* Restrict parallel_wq workers to pd->cpumask.pcpu. */
435  	cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
436  	err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
437  	free_workqueue_attrs(attrs);
438  
439  	return err;
440  }
441  
padata_mt_helper(struct work_struct * w)442  static void __init padata_mt_helper(struct work_struct *w)
443  {
444  	struct padata_work *pw = container_of(w, struct padata_work, pw_work);
445  	struct padata_mt_job_state *ps = pw->pw_data;
446  	struct padata_mt_job *job = ps->job;
447  	bool done;
448  
449  	spin_lock(&ps->lock);
450  
451  	while (job->size > 0) {
452  		unsigned long start, size, end;
453  
454  		start = job->start;
455  		/* So end is chunk size aligned if enough work remains. */
456  		size = roundup(start + 1, ps->chunk_size) - start;
457  		size = min(size, job->size);
458  		end = start + size;
459  
460  		job->start = end;
461  		job->size -= size;
462  
463  		spin_unlock(&ps->lock);
464  		job->thread_fn(start, end, job->fn_arg);
465  		spin_lock(&ps->lock);
466  	}
467  
468  	++ps->nworks_fini;
469  	done = (ps->nworks_fini == ps->nworks);
470  	spin_unlock(&ps->lock);
471  
472  	if (done)
473  		complete(&ps->completion);
474  }
475  
476  /**
477   * padata_do_multithreaded - run a multithreaded job
478   * @job: Description of the job.
479   *
480   * See the definition of struct padata_mt_job for more details.
481   */
padata_do_multithreaded(struct padata_mt_job * job)482  void __init padata_do_multithreaded(struct padata_mt_job *job)
483  {
484  	/* In case threads finish at different times. */
485  	static const unsigned long load_balance_factor = 4;
486  	struct padata_work my_work, *pw;
487  	struct padata_mt_job_state ps;
488  	LIST_HEAD(works);
489  	int nworks, nid;
490  	static atomic_t last_used_nid __initdata;
491  
492  	if (job->size == 0)
493  		return;
494  
495  	/* Ensure at least one thread when size < min_chunk. */
496  	nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
497  	nworks = min(nworks, job->max_threads);
498  
499  	if (nworks == 1) {
500  		/* Single thread, no coordination needed, cut to the chase. */
501  		job->thread_fn(job->start, job->start + job->size, job->fn_arg);
502  		return;
503  	}
504  
505  	spin_lock_init(&ps.lock);
506  	init_completion(&ps.completion);
507  	ps.job	       = job;
508  	ps.nworks      = padata_work_alloc_mt(nworks, &ps, &works);
509  	ps.nworks_fini = 0;
510  
511  	/*
512  	 * Chunk size is the amount of work a helper does per call to the
513  	 * thread function.  Load balance large jobs between threads by
514  	 * increasing the number of chunks, guarantee at least the minimum
515  	 * chunk size from the caller, and honor the caller's alignment.
516  	 * Ensure chunk_size is at least 1 to prevent divide-by-0
517  	 * panic in padata_mt_helper().
518  	 */
519  	ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
520  	ps.chunk_size = max(ps.chunk_size, job->min_chunk);
521  	ps.chunk_size = max(ps.chunk_size, 1ul);
522  	ps.chunk_size = roundup(ps.chunk_size, job->align);
523  
524  	/*
525  	 * chunk_size can be 0 if the caller sets min_chunk to 0. So force it
526  	 * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
527  	 */
528  	if (!ps.chunk_size)
529  		ps.chunk_size = 1U;
530  
531  	list_for_each_entry(pw, &works, pw_list)
532  		if (job->numa_aware) {
533  			int old_node = atomic_read(&last_used_nid);
534  
535  			do {
536  				nid = next_node_in(old_node, node_states[N_CPU]);
537  			} while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid));
538  			queue_work_node(nid, system_unbound_wq, &pw->pw_work);
539  		} else {
540  			queue_work(system_unbound_wq, &pw->pw_work);
541  		}
542  
543  	/* Use the current thread, which saves starting a workqueue worker. */
544  	padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
545  	padata_mt_helper(&my_work.pw_work);
546  
547  	/* Wait for all the helpers to finish. */
548  	wait_for_completion(&ps.completion);
549  
550  	destroy_work_on_stack(&my_work.pw_work);
551  	padata_works_free(&works);
552  }
553  
__padata_list_init(struct padata_list * pd_list)554  static void __padata_list_init(struct padata_list *pd_list)
555  {
556  	INIT_LIST_HEAD(&pd_list->list);
557  	spin_lock_init(&pd_list->lock);
558  }
559  
560  /* Initialize all percpu queues used by serial workers */
padata_init_squeues(struct parallel_data * pd)561  static void padata_init_squeues(struct parallel_data *pd)
562  {
563  	int cpu;
564  	struct padata_serial_queue *squeue;
565  
566  	for_each_cpu(cpu, pd->cpumask.cbcpu) {
567  		squeue = per_cpu_ptr(pd->squeue, cpu);
568  		squeue->pd = pd;
569  		__padata_list_init(&squeue->serial);
570  		INIT_WORK(&squeue->work, padata_serial_worker);
571  	}
572  }
573  
574  /* Initialize per-CPU reorder lists */
padata_init_reorder_list(struct parallel_data * pd)575  static void padata_init_reorder_list(struct parallel_data *pd)
576  {
577  	int cpu;
578  	struct padata_list *list;
579  
580  	for_each_cpu(cpu, pd->cpumask.pcpu) {
581  		list = per_cpu_ptr(pd->reorder_list, cpu);
582  		__padata_list_init(list);
583  	}
584  }
585  
586  /* Allocate and initialize the internal cpumask dependend resources. */
padata_alloc_pd(struct padata_shell * ps)587  static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
588  {
589  	struct padata_instance *pinst = ps->pinst;
590  	struct parallel_data *pd;
591  
592  	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
593  	if (!pd)
594  		goto err;
595  
596  	pd->reorder_list = alloc_percpu(struct padata_list);
597  	if (!pd->reorder_list)
598  		goto err_free_pd;
599  
600  	pd->squeue = alloc_percpu(struct padata_serial_queue);
601  	if (!pd->squeue)
602  		goto err_free_reorder_list;
603  
604  	pd->ps = ps;
605  
606  	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
607  		goto err_free_squeue;
608  	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
609  		goto err_free_pcpu;
610  
611  	cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
612  	cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
613  
614  	padata_init_reorder_list(pd);
615  	padata_init_squeues(pd);
616  	pd->seq_nr = -1;
617  	refcount_set(&pd->refcnt, 1);
618  	spin_lock_init(&pd->lock);
619  	pd->cpu = cpumask_first(pd->cpumask.pcpu);
620  	INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
621  
622  	return pd;
623  
624  err_free_pcpu:
625  	free_cpumask_var(pd->cpumask.pcpu);
626  err_free_squeue:
627  	free_percpu(pd->squeue);
628  err_free_reorder_list:
629  	free_percpu(pd->reorder_list);
630  err_free_pd:
631  	kfree(pd);
632  err:
633  	return NULL;
634  }
635  
padata_free_pd(struct parallel_data * pd)636  static void padata_free_pd(struct parallel_data *pd)
637  {
638  	free_cpumask_var(pd->cpumask.pcpu);
639  	free_cpumask_var(pd->cpumask.cbcpu);
640  	free_percpu(pd->reorder_list);
641  	free_percpu(pd->squeue);
642  	kfree(pd);
643  }
644  
__padata_start(struct padata_instance * pinst)645  static void __padata_start(struct padata_instance *pinst)
646  {
647  	pinst->flags |= PADATA_INIT;
648  }
649  
__padata_stop(struct padata_instance * pinst)650  static void __padata_stop(struct padata_instance *pinst)
651  {
652  	if (!(pinst->flags & PADATA_INIT))
653  		return;
654  
655  	pinst->flags &= ~PADATA_INIT;
656  
657  	synchronize_rcu();
658  }
659  
660  /* Replace the internal control structure with a new one. */
padata_replace_one(struct padata_shell * ps)661  static int padata_replace_one(struct padata_shell *ps)
662  {
663  	struct parallel_data *pd_new;
664  
665  	pd_new = padata_alloc_pd(ps);
666  	if (!pd_new)
667  		return -ENOMEM;
668  
669  	ps->opd = rcu_dereference_protected(ps->pd, 1);
670  	rcu_assign_pointer(ps->pd, pd_new);
671  
672  	return 0;
673  }
674  
padata_replace(struct padata_instance * pinst)675  static int padata_replace(struct padata_instance *pinst)
676  {
677  	struct padata_shell *ps;
678  	int err = 0;
679  
680  	pinst->flags |= PADATA_RESET;
681  
682  	list_for_each_entry(ps, &pinst->pslist, list) {
683  		err = padata_replace_one(ps);
684  		if (err)
685  			break;
686  	}
687  
688  	synchronize_rcu();
689  
690  	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
691  		if (refcount_dec_and_test(&ps->opd->refcnt))
692  			padata_free_pd(ps->opd);
693  
694  	pinst->flags &= ~PADATA_RESET;
695  
696  	return err;
697  }
698  
699  /* If cpumask contains no active cpu, we mark the instance as invalid. */
padata_validate_cpumask(struct padata_instance * pinst,const struct cpumask * cpumask)700  static bool padata_validate_cpumask(struct padata_instance *pinst,
701  				    const struct cpumask *cpumask)
702  {
703  	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
704  		pinst->flags |= PADATA_INVALID;
705  		return false;
706  	}
707  
708  	pinst->flags &= ~PADATA_INVALID;
709  	return true;
710  }
711  
__padata_set_cpumasks(struct padata_instance * pinst,cpumask_var_t pcpumask,cpumask_var_t cbcpumask)712  static int __padata_set_cpumasks(struct padata_instance *pinst,
713  				 cpumask_var_t pcpumask,
714  				 cpumask_var_t cbcpumask)
715  {
716  	int valid;
717  	int err;
718  
719  	valid = padata_validate_cpumask(pinst, pcpumask);
720  	if (!valid) {
721  		__padata_stop(pinst);
722  		goto out_replace;
723  	}
724  
725  	valid = padata_validate_cpumask(pinst, cbcpumask);
726  	if (!valid)
727  		__padata_stop(pinst);
728  
729  out_replace:
730  	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
731  	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
732  
733  	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
734  
735  	if (valid)
736  		__padata_start(pinst);
737  
738  	return err;
739  }
740  
741  /**
742   * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
743   *                      equivalent to @cpumask.
744   * @pinst: padata instance
745   * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
746   *                to parallel and serial cpumasks respectively.
747   * @cpumask: the cpumask to use
748   *
749   * Return: 0 on success or negative error code
750   */
padata_set_cpumask(struct padata_instance * pinst,int cpumask_type,cpumask_var_t cpumask)751  int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
752  		       cpumask_var_t cpumask)
753  {
754  	struct cpumask *serial_mask, *parallel_mask;
755  	int err = -EINVAL;
756  
757  	cpus_read_lock();
758  	mutex_lock(&pinst->lock);
759  
760  	switch (cpumask_type) {
761  	case PADATA_CPU_PARALLEL:
762  		serial_mask = pinst->cpumask.cbcpu;
763  		parallel_mask = cpumask;
764  		break;
765  	case PADATA_CPU_SERIAL:
766  		parallel_mask = pinst->cpumask.pcpu;
767  		serial_mask = cpumask;
768  		break;
769  	default:
770  		 goto out;
771  	}
772  
773  	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
774  
775  out:
776  	mutex_unlock(&pinst->lock);
777  	cpus_read_unlock();
778  
779  	return err;
780  }
781  EXPORT_SYMBOL(padata_set_cpumask);
782  
783  #ifdef CONFIG_HOTPLUG_CPU
784  
__padata_add_cpu(struct padata_instance * pinst,int cpu)785  static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
786  {
787  	int err = 0;
788  
789  	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
790  		err = padata_replace(pinst);
791  
792  		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
793  		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
794  			__padata_start(pinst);
795  	}
796  
797  	return err;
798  }
799  
__padata_remove_cpu(struct padata_instance * pinst,int cpu)800  static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
801  {
802  	int err = 0;
803  
804  	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
805  		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
806  		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
807  			__padata_stop(pinst);
808  
809  		err = padata_replace(pinst);
810  	}
811  
812  	return err;
813  }
814  
pinst_has_cpu(struct padata_instance * pinst,int cpu)815  static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
816  {
817  	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
818  		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
819  }
820  
padata_cpu_online(unsigned int cpu,struct hlist_node * node)821  static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
822  {
823  	struct padata_instance *pinst;
824  	int ret;
825  
826  	pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
827  	if (!pinst_has_cpu(pinst, cpu))
828  		return 0;
829  
830  	mutex_lock(&pinst->lock);
831  	ret = __padata_add_cpu(pinst, cpu);
832  	mutex_unlock(&pinst->lock);
833  	return ret;
834  }
835  
padata_cpu_dead(unsigned int cpu,struct hlist_node * node)836  static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
837  {
838  	struct padata_instance *pinst;
839  	int ret;
840  
841  	pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
842  	if (!pinst_has_cpu(pinst, cpu))
843  		return 0;
844  
845  	mutex_lock(&pinst->lock);
846  	ret = __padata_remove_cpu(pinst, cpu);
847  	mutex_unlock(&pinst->lock);
848  	return ret;
849  }
850  
851  static enum cpuhp_state hp_online;
852  #endif
853  
__padata_free(struct padata_instance * pinst)854  static void __padata_free(struct padata_instance *pinst)
855  {
856  #ifdef CONFIG_HOTPLUG_CPU
857  	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
858  					    &pinst->cpu_dead_node);
859  	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
860  #endif
861  
862  	WARN_ON(!list_empty(&pinst->pslist));
863  
864  	free_cpumask_var(pinst->cpumask.pcpu);
865  	free_cpumask_var(pinst->cpumask.cbcpu);
866  	destroy_workqueue(pinst->serial_wq);
867  	destroy_workqueue(pinst->parallel_wq);
868  	kfree(pinst);
869  }
870  
871  #define kobj2pinst(_kobj)					\
872  	container_of(_kobj, struct padata_instance, kobj)
873  #define attr2pentry(_attr)					\
874  	container_of(_attr, struct padata_sysfs_entry, attr)
875  
padata_sysfs_release(struct kobject * kobj)876  static void padata_sysfs_release(struct kobject *kobj)
877  {
878  	struct padata_instance *pinst = kobj2pinst(kobj);
879  	__padata_free(pinst);
880  }
881  
882  struct padata_sysfs_entry {
883  	struct attribute attr;
884  	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
885  	ssize_t (*store)(struct padata_instance *, struct attribute *,
886  			 const char *, size_t);
887  };
888  
show_cpumask(struct padata_instance * pinst,struct attribute * attr,char * buf)889  static ssize_t show_cpumask(struct padata_instance *pinst,
890  			    struct attribute *attr,  char *buf)
891  {
892  	struct cpumask *cpumask;
893  	ssize_t len;
894  
895  	mutex_lock(&pinst->lock);
896  	if (!strcmp(attr->name, "serial_cpumask"))
897  		cpumask = pinst->cpumask.cbcpu;
898  	else
899  		cpumask = pinst->cpumask.pcpu;
900  
901  	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
902  		       nr_cpu_ids, cpumask_bits(cpumask));
903  	mutex_unlock(&pinst->lock);
904  	return len < PAGE_SIZE ? len : -EINVAL;
905  }
906  
store_cpumask(struct padata_instance * pinst,struct attribute * attr,const char * buf,size_t count)907  static ssize_t store_cpumask(struct padata_instance *pinst,
908  			     struct attribute *attr,
909  			     const char *buf, size_t count)
910  {
911  	cpumask_var_t new_cpumask;
912  	ssize_t ret;
913  	int mask_type;
914  
915  	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
916  		return -ENOMEM;
917  
918  	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
919  			   nr_cpumask_bits);
920  	if (ret < 0)
921  		goto out;
922  
923  	mask_type = !strcmp(attr->name, "serial_cpumask") ?
924  		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
925  	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
926  	if (!ret)
927  		ret = count;
928  
929  out:
930  	free_cpumask_var(new_cpumask);
931  	return ret;
932  }
933  
934  #define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
935  	static struct padata_sysfs_entry _name##_attr =		\
936  		__ATTR(_name, 0644, _show_name, _store_name)
937  #define PADATA_ATTR_RO(_name, _show_name)		\
938  	static struct padata_sysfs_entry _name##_attr = \
939  		__ATTR(_name, 0400, _show_name, NULL)
940  
941  PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
942  PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
943  
944  /*
945   * Padata sysfs provides the following objects:
946   * serial_cpumask   [RW] - cpumask for serial workers
947   * parallel_cpumask [RW] - cpumask for parallel workers
948   */
949  static struct attribute *padata_default_attrs[] = {
950  	&serial_cpumask_attr.attr,
951  	&parallel_cpumask_attr.attr,
952  	NULL,
953  };
954  ATTRIBUTE_GROUPS(padata_default);
955  
padata_sysfs_show(struct kobject * kobj,struct attribute * attr,char * buf)956  static ssize_t padata_sysfs_show(struct kobject *kobj,
957  				 struct attribute *attr, char *buf)
958  {
959  	struct padata_instance *pinst;
960  	struct padata_sysfs_entry *pentry;
961  	ssize_t ret = -EIO;
962  
963  	pinst = kobj2pinst(kobj);
964  	pentry = attr2pentry(attr);
965  	if (pentry->show)
966  		ret = pentry->show(pinst, attr, buf);
967  
968  	return ret;
969  }
970  
padata_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)971  static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
972  				  const char *buf, size_t count)
973  {
974  	struct padata_instance *pinst;
975  	struct padata_sysfs_entry *pentry;
976  	ssize_t ret = -EIO;
977  
978  	pinst = kobj2pinst(kobj);
979  	pentry = attr2pentry(attr);
980  	if (pentry->show)
981  		ret = pentry->store(pinst, attr, buf, count);
982  
983  	return ret;
984  }
985  
986  static const struct sysfs_ops padata_sysfs_ops = {
987  	.show = padata_sysfs_show,
988  	.store = padata_sysfs_store,
989  };
990  
991  static const struct kobj_type padata_attr_type = {
992  	.sysfs_ops = &padata_sysfs_ops,
993  	.default_groups = padata_default_groups,
994  	.release = padata_sysfs_release,
995  };
996  
997  /**
998   * padata_alloc - allocate and initialize a padata instance
999   * @name: used to identify the instance
1000   *
1001   * Return: new instance on success, NULL on error
1002   */
padata_alloc(const char * name)1003  struct padata_instance *padata_alloc(const char *name)
1004  {
1005  	struct padata_instance *pinst;
1006  
1007  	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1008  	if (!pinst)
1009  		goto err;
1010  
1011  	pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
1012  					     name);
1013  	if (!pinst->parallel_wq)
1014  		goto err_free_inst;
1015  
1016  	cpus_read_lock();
1017  
1018  	pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1019  					   WQ_CPU_INTENSIVE, 1, name);
1020  	if (!pinst->serial_wq)
1021  		goto err_put_cpus;
1022  
1023  	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1024  		goto err_free_serial_wq;
1025  	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1026  		free_cpumask_var(pinst->cpumask.pcpu);
1027  		goto err_free_serial_wq;
1028  	}
1029  
1030  	INIT_LIST_HEAD(&pinst->pslist);
1031  
1032  	cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1033  	cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1034  
1035  	if (padata_setup_cpumasks(pinst))
1036  		goto err_free_masks;
1037  
1038  	__padata_start(pinst);
1039  
1040  	kobject_init(&pinst->kobj, &padata_attr_type);
1041  	mutex_init(&pinst->lock);
1042  
1043  #ifdef CONFIG_HOTPLUG_CPU
1044  	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1045  						    &pinst->cpu_online_node);
1046  	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1047  						    &pinst->cpu_dead_node);
1048  #endif
1049  
1050  	cpus_read_unlock();
1051  
1052  	return pinst;
1053  
1054  err_free_masks:
1055  	free_cpumask_var(pinst->cpumask.pcpu);
1056  	free_cpumask_var(pinst->cpumask.cbcpu);
1057  err_free_serial_wq:
1058  	destroy_workqueue(pinst->serial_wq);
1059  err_put_cpus:
1060  	cpus_read_unlock();
1061  	destroy_workqueue(pinst->parallel_wq);
1062  err_free_inst:
1063  	kfree(pinst);
1064  err:
1065  	return NULL;
1066  }
1067  EXPORT_SYMBOL(padata_alloc);
1068  
1069  /**
1070   * padata_free - free a padata instance
1071   *
1072   * @pinst: padata instance to free
1073   */
padata_free(struct padata_instance * pinst)1074  void padata_free(struct padata_instance *pinst)
1075  {
1076  	kobject_put(&pinst->kobj);
1077  }
1078  EXPORT_SYMBOL(padata_free);
1079  
1080  /**
1081   * padata_alloc_shell - Allocate and initialize padata shell.
1082   *
1083   * @pinst: Parent padata_instance object.
1084   *
1085   * Return: new shell on success, NULL on error
1086   */
padata_alloc_shell(struct padata_instance * pinst)1087  struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1088  {
1089  	struct parallel_data *pd;
1090  	struct padata_shell *ps;
1091  
1092  	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1093  	if (!ps)
1094  		goto out;
1095  
1096  	ps->pinst = pinst;
1097  
1098  	cpus_read_lock();
1099  	pd = padata_alloc_pd(ps);
1100  	cpus_read_unlock();
1101  
1102  	if (!pd)
1103  		goto out_free_ps;
1104  
1105  	mutex_lock(&pinst->lock);
1106  	RCU_INIT_POINTER(ps->pd, pd);
1107  	list_add(&ps->list, &pinst->pslist);
1108  	mutex_unlock(&pinst->lock);
1109  
1110  	return ps;
1111  
1112  out_free_ps:
1113  	kfree(ps);
1114  out:
1115  	return NULL;
1116  }
1117  EXPORT_SYMBOL(padata_alloc_shell);
1118  
1119  /**
1120   * padata_free_shell - free a padata shell
1121   *
1122   * @ps: padata shell to free
1123   */
padata_free_shell(struct padata_shell * ps)1124  void padata_free_shell(struct padata_shell *ps)
1125  {
1126  	struct parallel_data *pd;
1127  
1128  	if (!ps)
1129  		return;
1130  
1131  	mutex_lock(&ps->pinst->lock);
1132  	list_del(&ps->list);
1133  	pd = rcu_dereference_protected(ps->pd, 1);
1134  	if (refcount_dec_and_test(&pd->refcnt))
1135  		padata_free_pd(pd);
1136  	mutex_unlock(&ps->pinst->lock);
1137  
1138  	kfree(ps);
1139  }
1140  EXPORT_SYMBOL(padata_free_shell);
1141  
padata_init(void)1142  void __init padata_init(void)
1143  {
1144  	unsigned int i, possible_cpus;
1145  #ifdef CONFIG_HOTPLUG_CPU
1146  	int ret;
1147  
1148  	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1149  				      padata_cpu_online, NULL);
1150  	if (ret < 0)
1151  		goto err;
1152  	hp_online = ret;
1153  
1154  	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1155  				      NULL, padata_cpu_dead);
1156  	if (ret < 0)
1157  		goto remove_online_state;
1158  #endif
1159  
1160  	possible_cpus = num_possible_cpus();
1161  	padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1162  				     GFP_KERNEL);
1163  	if (!padata_works)
1164  		goto remove_dead_state;
1165  
1166  	for (i = 0; i < possible_cpus; ++i)
1167  		list_add(&padata_works[i].pw_list, &padata_free_works);
1168  
1169  	return;
1170  
1171  remove_dead_state:
1172  #ifdef CONFIG_HOTPLUG_CPU
1173  	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1174  remove_online_state:
1175  	cpuhp_remove_multi_state(hp_online);
1176  err:
1177  #endif
1178  	pr_warn("padata: initialization failed\n");
1179  }
1180