1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * drivers/base/devres.c - device resource management
4   *
5   * Copyright (c) 2006  SUSE Linux Products GmbH
6   * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7   */
8  
9  #include <linux/device.h>
10  #include <linux/module.h>
11  #include <linux/slab.h>
12  #include <linux/percpu.h>
13  
14  #include <asm/sections.h>
15  
16  #include "base.h"
17  #include "trace.h"
18  
19  struct devres_node {
20  	struct list_head		entry;
21  	dr_release_t			release;
22  	const char			*name;
23  	size_t				size;
24  };
25  
26  struct devres {
27  	struct devres_node		node;
28  	/*
29  	 * Some archs want to perform DMA into kmalloc caches
30  	 * and need a guaranteed alignment larger than
31  	 * the alignment of a 64-bit integer.
32  	 * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
33  	 * alignment for struct devres when allocated by kmalloc().
34  	 */
35  	u8 __aligned(ARCH_DMA_MINALIGN) data[];
36  };
37  
38  struct devres_group {
39  	struct devres_node		node[2];
40  	void				*id;
41  	int				color;
42  	/* -- 8 pointers */
43  };
44  
set_node_dbginfo(struct devres_node * node,const char * name,size_t size)45  static void set_node_dbginfo(struct devres_node *node, const char *name,
46  			     size_t size)
47  {
48  	node->name = name;
49  	node->size = size;
50  }
51  
52  #ifdef CONFIG_DEBUG_DEVRES
53  static int log_devres = 0;
54  module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
55  
devres_dbg(struct device * dev,struct devres_node * node,const char * op)56  static void devres_dbg(struct device *dev, struct devres_node *node,
57  		       const char *op)
58  {
59  	if (unlikely(log_devres))
60  		dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
61  			op, node, node->name, node->size);
62  }
63  #else /* CONFIG_DEBUG_DEVRES */
64  #define devres_dbg(dev, node, op)	do {} while (0)
65  #endif /* CONFIG_DEBUG_DEVRES */
66  
devres_log(struct device * dev,struct devres_node * node,const char * op)67  static void devres_log(struct device *dev, struct devres_node *node,
68  		       const char *op)
69  {
70  	trace_devres_log(dev, op, node, node->name, node->size);
71  	devres_dbg(dev, node, op);
72  }
73  
74  /*
75   * Release functions for devres group.  These callbacks are used only
76   * for identification.
77   */
group_open_release(struct device * dev,void * res)78  static void group_open_release(struct device *dev, void *res)
79  {
80  	/* noop */
81  }
82  
group_close_release(struct device * dev,void * res)83  static void group_close_release(struct device *dev, void *res)
84  {
85  	/* noop */
86  }
87  
node_to_group(struct devres_node * node)88  static struct devres_group *node_to_group(struct devres_node *node)
89  {
90  	if (node->release == &group_open_release)
91  		return container_of(node, struct devres_group, node[0]);
92  	if (node->release == &group_close_release)
93  		return container_of(node, struct devres_group, node[1]);
94  	return NULL;
95  }
96  
check_dr_size(size_t size,size_t * tot_size)97  static bool check_dr_size(size_t size, size_t *tot_size)
98  {
99  	/* We must catch any near-SIZE_MAX cases that could overflow. */
100  	if (unlikely(check_add_overflow(sizeof(struct devres),
101  					size, tot_size)))
102  		return false;
103  
104  	/* Actually allocate the full kmalloc bucket size. */
105  	*tot_size = kmalloc_size_roundup(*tot_size);
106  
107  	return true;
108  }
109  
alloc_dr(dr_release_t release,size_t size,gfp_t gfp,int nid)110  static __always_inline struct devres *alloc_dr(dr_release_t release,
111  					       size_t size, gfp_t gfp, int nid)
112  {
113  	size_t tot_size;
114  	struct devres *dr;
115  
116  	if (!check_dr_size(size, &tot_size))
117  		return NULL;
118  
119  	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
120  	if (unlikely(!dr))
121  		return NULL;
122  
123  	/* No need to clear memory twice */
124  	if (!(gfp & __GFP_ZERO))
125  		memset(dr, 0, offsetof(struct devres, data));
126  
127  	INIT_LIST_HEAD(&dr->node.entry);
128  	dr->node.release = release;
129  	return dr;
130  }
131  
add_dr(struct device * dev,struct devres_node * node)132  static void add_dr(struct device *dev, struct devres_node *node)
133  {
134  	devres_log(dev, node, "ADD");
135  	BUG_ON(!list_empty(&node->entry));
136  	list_add_tail(&node->entry, &dev->devres_head);
137  }
138  
replace_dr(struct device * dev,struct devres_node * old,struct devres_node * new)139  static void replace_dr(struct device *dev,
140  		       struct devres_node *old, struct devres_node *new)
141  {
142  	devres_log(dev, old, "REPLACE");
143  	BUG_ON(!list_empty(&new->entry));
144  	list_replace(&old->entry, &new->entry);
145  }
146  
147  /**
148   * __devres_alloc_node - Allocate device resource data
149   * @release: Release function devres will be associated with
150   * @size: Allocation size
151   * @gfp: Allocation flags
152   * @nid: NUMA node
153   * @name: Name of the resource
154   *
155   * Allocate devres of @size bytes.  The allocated area is zeroed, then
156   * associated with @release.  The returned pointer can be passed to
157   * other devres_*() functions.
158   *
159   * RETURNS:
160   * Pointer to allocated devres on success, NULL on failure.
161   */
__devres_alloc_node(dr_release_t release,size_t size,gfp_t gfp,int nid,const char * name)162  void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
163  			  const char *name)
164  {
165  	struct devres *dr;
166  
167  	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
168  	if (unlikely(!dr))
169  		return NULL;
170  	set_node_dbginfo(&dr->node, name, size);
171  	return dr->data;
172  }
173  EXPORT_SYMBOL_GPL(__devres_alloc_node);
174  
175  /**
176   * devres_for_each_res - Resource iterator
177   * @dev: Device to iterate resource from
178   * @release: Look for resources associated with this release function
179   * @match: Match function (optional)
180   * @match_data: Data for the match function
181   * @fn: Function to be called for each matched resource.
182   * @data: Data for @fn, the 3rd parameter of @fn
183   *
184   * Call @fn for each devres of @dev which is associated with @release
185   * and for which @match returns 1.
186   *
187   * RETURNS:
188   * 	void
189   */
devres_for_each_res(struct device * dev,dr_release_t release,dr_match_t match,void * match_data,void (* fn)(struct device *,void *,void *),void * data)190  void devres_for_each_res(struct device *dev, dr_release_t release,
191  			dr_match_t match, void *match_data,
192  			void (*fn)(struct device *, void *, void *),
193  			void *data)
194  {
195  	struct devres_node *node;
196  	struct devres_node *tmp;
197  	unsigned long flags;
198  
199  	if (!fn)
200  		return;
201  
202  	spin_lock_irqsave(&dev->devres_lock, flags);
203  	list_for_each_entry_safe_reverse(node, tmp,
204  			&dev->devres_head, entry) {
205  		struct devres *dr = container_of(node, struct devres, node);
206  
207  		if (node->release != release)
208  			continue;
209  		if (match && !match(dev, dr->data, match_data))
210  			continue;
211  		fn(dev, dr->data, data);
212  	}
213  	spin_unlock_irqrestore(&dev->devres_lock, flags);
214  }
215  EXPORT_SYMBOL_GPL(devres_for_each_res);
216  
217  /**
218   * devres_free - Free device resource data
219   * @res: Pointer to devres data to free
220   *
221   * Free devres created with devres_alloc().
222   */
devres_free(void * res)223  void devres_free(void *res)
224  {
225  	if (res) {
226  		struct devres *dr = container_of(res, struct devres, data);
227  
228  		BUG_ON(!list_empty(&dr->node.entry));
229  		kfree(dr);
230  	}
231  }
232  EXPORT_SYMBOL_GPL(devres_free);
233  
234  /**
235   * devres_add - Register device resource
236   * @dev: Device to add resource to
237   * @res: Resource to register
238   *
239   * Register devres @res to @dev.  @res should have been allocated
240   * using devres_alloc().  On driver detach, the associated release
241   * function will be invoked and devres will be freed automatically.
242   */
devres_add(struct device * dev,void * res)243  void devres_add(struct device *dev, void *res)
244  {
245  	struct devres *dr = container_of(res, struct devres, data);
246  	unsigned long flags;
247  
248  	spin_lock_irqsave(&dev->devres_lock, flags);
249  	add_dr(dev, &dr->node);
250  	spin_unlock_irqrestore(&dev->devres_lock, flags);
251  }
252  EXPORT_SYMBOL_GPL(devres_add);
253  
find_dr(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)254  static struct devres *find_dr(struct device *dev, dr_release_t release,
255  			      dr_match_t match, void *match_data)
256  {
257  	struct devres_node *node;
258  
259  	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
260  		struct devres *dr = container_of(node, struct devres, node);
261  
262  		if (node->release != release)
263  			continue;
264  		if (match && !match(dev, dr->data, match_data))
265  			continue;
266  		return dr;
267  	}
268  
269  	return NULL;
270  }
271  
272  /**
273   * devres_find - Find device resource
274   * @dev: Device to lookup resource from
275   * @release: Look for resources associated with this release function
276   * @match: Match function (optional)
277   * @match_data: Data for the match function
278   *
279   * Find the latest devres of @dev which is associated with @release
280   * and for which @match returns 1.  If @match is NULL, it's considered
281   * to match all.
282   *
283   * RETURNS:
284   * Pointer to found devres, NULL if not found.
285   */
devres_find(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)286  void *devres_find(struct device *dev, dr_release_t release,
287  		  dr_match_t match, void *match_data)
288  {
289  	struct devres *dr;
290  	unsigned long flags;
291  
292  	spin_lock_irqsave(&dev->devres_lock, flags);
293  	dr = find_dr(dev, release, match, match_data);
294  	spin_unlock_irqrestore(&dev->devres_lock, flags);
295  
296  	if (dr)
297  		return dr->data;
298  	return NULL;
299  }
300  EXPORT_SYMBOL_GPL(devres_find);
301  
302  /**
303   * devres_get - Find devres, if non-existent, add one atomically
304   * @dev: Device to lookup or add devres for
305   * @new_res: Pointer to new initialized devres to add if not found
306   * @match: Match function (optional)
307   * @match_data: Data for the match function
308   *
309   * Find the latest devres of @dev which has the same release function
310   * as @new_res and for which @match return 1.  If found, @new_res is
311   * freed; otherwise, @new_res is added atomically.
312   *
313   * RETURNS:
314   * Pointer to found or added devres.
315   */
devres_get(struct device * dev,void * new_res,dr_match_t match,void * match_data)316  void *devres_get(struct device *dev, void *new_res,
317  		 dr_match_t match, void *match_data)
318  {
319  	struct devres *new_dr = container_of(new_res, struct devres, data);
320  	struct devres *dr;
321  	unsigned long flags;
322  
323  	spin_lock_irqsave(&dev->devres_lock, flags);
324  	dr = find_dr(dev, new_dr->node.release, match, match_data);
325  	if (!dr) {
326  		add_dr(dev, &new_dr->node);
327  		dr = new_dr;
328  		new_res = NULL;
329  	}
330  	spin_unlock_irqrestore(&dev->devres_lock, flags);
331  	devres_free(new_res);
332  
333  	return dr->data;
334  }
335  EXPORT_SYMBOL_GPL(devres_get);
336  
337  /**
338   * devres_remove - Find a device resource and remove it
339   * @dev: Device to find resource from
340   * @release: Look for resources associated with this release function
341   * @match: Match function (optional)
342   * @match_data: Data for the match function
343   *
344   * Find the latest devres of @dev associated with @release and for
345   * which @match returns 1.  If @match is NULL, it's considered to
346   * match all.  If found, the resource is removed atomically and
347   * returned.
348   *
349   * RETURNS:
350   * Pointer to removed devres on success, NULL if not found.
351   */
devres_remove(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)352  void *devres_remove(struct device *dev, dr_release_t release,
353  		    dr_match_t match, void *match_data)
354  {
355  	struct devres *dr;
356  	unsigned long flags;
357  
358  	spin_lock_irqsave(&dev->devres_lock, flags);
359  	dr = find_dr(dev, release, match, match_data);
360  	if (dr) {
361  		list_del_init(&dr->node.entry);
362  		devres_log(dev, &dr->node, "REM");
363  	}
364  	spin_unlock_irqrestore(&dev->devres_lock, flags);
365  
366  	if (dr)
367  		return dr->data;
368  	return NULL;
369  }
370  EXPORT_SYMBOL_GPL(devres_remove);
371  
372  /**
373   * devres_destroy - Find a device resource and destroy it
374   * @dev: Device to find resource from
375   * @release: Look for resources associated with this release function
376   * @match: Match function (optional)
377   * @match_data: Data for the match function
378   *
379   * Find the latest devres of @dev associated with @release and for
380   * which @match returns 1.  If @match is NULL, it's considered to
381   * match all.  If found, the resource is removed atomically and freed.
382   *
383   * Note that the release function for the resource will not be called,
384   * only the devres-allocated data will be freed.  The caller becomes
385   * responsible for freeing any other data.
386   *
387   * RETURNS:
388   * 0 if devres is found and freed, -ENOENT if not found.
389   */
devres_destroy(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)390  int devres_destroy(struct device *dev, dr_release_t release,
391  		   dr_match_t match, void *match_data)
392  {
393  	void *res;
394  
395  	res = devres_remove(dev, release, match, match_data);
396  	if (unlikely(!res))
397  		return -ENOENT;
398  
399  	devres_free(res);
400  	return 0;
401  }
402  EXPORT_SYMBOL_GPL(devres_destroy);
403  
404  
405  /**
406   * devres_release - Find a device resource and destroy it, calling release
407   * @dev: Device to find resource from
408   * @release: Look for resources associated with this release function
409   * @match: Match function (optional)
410   * @match_data: Data for the match function
411   *
412   * Find the latest devres of @dev associated with @release and for
413   * which @match returns 1.  If @match is NULL, it's considered to
414   * match all.  If found, the resource is removed atomically, the
415   * release function called and the resource freed.
416   *
417   * RETURNS:
418   * 0 if devres is found and freed, -ENOENT if not found.
419   */
devres_release(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)420  int devres_release(struct device *dev, dr_release_t release,
421  		   dr_match_t match, void *match_data)
422  {
423  	void *res;
424  
425  	res = devres_remove(dev, release, match, match_data);
426  	if (unlikely(!res))
427  		return -ENOENT;
428  
429  	(*release)(dev, res);
430  	devres_free(res);
431  	return 0;
432  }
433  EXPORT_SYMBOL_GPL(devres_release);
434  
remove_nodes(struct device * dev,struct list_head * first,struct list_head * end,struct list_head * todo)435  static int remove_nodes(struct device *dev,
436  			struct list_head *first, struct list_head *end,
437  			struct list_head *todo)
438  {
439  	struct devres_node *node, *n;
440  	int cnt = 0, nr_groups = 0;
441  
442  	/* First pass - move normal devres entries to @todo and clear
443  	 * devres_group colors.
444  	 */
445  	node = list_entry(first, struct devres_node, entry);
446  	list_for_each_entry_safe_from(node, n, end, entry) {
447  		struct devres_group *grp;
448  
449  		grp = node_to_group(node);
450  		if (grp) {
451  			/* clear color of group markers in the first pass */
452  			grp->color = 0;
453  			nr_groups++;
454  		} else {
455  			/* regular devres entry */
456  			if (&node->entry == first)
457  				first = first->next;
458  			list_move_tail(&node->entry, todo);
459  			cnt++;
460  		}
461  	}
462  
463  	if (!nr_groups)
464  		return cnt;
465  
466  	/* Second pass - Scan groups and color them.  A group gets
467  	 * color value of two iff the group is wholly contained in
468  	 * [current node, end). That is, for a closed group, both opening
469  	 * and closing markers should be in the range, while just the
470  	 * opening marker is enough for an open group.
471  	 */
472  	node = list_entry(first, struct devres_node, entry);
473  	list_for_each_entry_safe_from(node, n, end, entry) {
474  		struct devres_group *grp;
475  
476  		grp = node_to_group(node);
477  		BUG_ON(!grp || list_empty(&grp->node[0].entry));
478  
479  		grp->color++;
480  		if (list_empty(&grp->node[1].entry))
481  			grp->color++;
482  
483  		BUG_ON(grp->color <= 0 || grp->color > 2);
484  		if (grp->color == 2) {
485  			/* No need to update current node or end. The removed
486  			 * nodes are always before both.
487  			 */
488  			list_move_tail(&grp->node[0].entry, todo);
489  			list_del_init(&grp->node[1].entry);
490  		}
491  	}
492  
493  	return cnt;
494  }
495  
release_nodes(struct device * dev,struct list_head * todo)496  static void release_nodes(struct device *dev, struct list_head *todo)
497  {
498  	struct devres *dr, *tmp;
499  
500  	/* Release.  Note that both devres and devres_group are
501  	 * handled as devres in the following loop.  This is safe.
502  	 */
503  	list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
504  		devres_log(dev, &dr->node, "REL");
505  		dr->node.release(dev, dr->data);
506  		kfree(dr);
507  	}
508  }
509  
510  /**
511   * devres_release_all - Release all managed resources
512   * @dev: Device to release resources for
513   *
514   * Release all resources associated with @dev.  This function is
515   * called on driver detach.
516   */
devres_release_all(struct device * dev)517  int devres_release_all(struct device *dev)
518  {
519  	unsigned long flags;
520  	LIST_HEAD(todo);
521  	int cnt;
522  
523  	/* Looks like an uninitialized device structure */
524  	if (WARN_ON(dev->devres_head.next == NULL))
525  		return -ENODEV;
526  
527  	/* Nothing to release if list is empty */
528  	if (list_empty(&dev->devres_head))
529  		return 0;
530  
531  	spin_lock_irqsave(&dev->devres_lock, flags);
532  	cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
533  	spin_unlock_irqrestore(&dev->devres_lock, flags);
534  
535  	release_nodes(dev, &todo);
536  	return cnt;
537  }
538  
539  /**
540   * devres_open_group - Open a new devres group
541   * @dev: Device to open devres group for
542   * @id: Separator ID
543   * @gfp: Allocation flags
544   *
545   * Open a new devres group for @dev with @id.  For @id, using a
546   * pointer to an object which won't be used for another group is
547   * recommended.  If @id is NULL, address-wise unique ID is created.
548   *
549   * RETURNS:
550   * ID of the new group, NULL on failure.
551   */
devres_open_group(struct device * dev,void * id,gfp_t gfp)552  void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
553  {
554  	struct devres_group *grp;
555  	unsigned long flags;
556  
557  	grp = kmalloc(sizeof(*grp), gfp);
558  	if (unlikely(!grp))
559  		return NULL;
560  
561  	grp->node[0].release = &group_open_release;
562  	grp->node[1].release = &group_close_release;
563  	INIT_LIST_HEAD(&grp->node[0].entry);
564  	INIT_LIST_HEAD(&grp->node[1].entry);
565  	set_node_dbginfo(&grp->node[0], "grp<", 0);
566  	set_node_dbginfo(&grp->node[1], "grp>", 0);
567  	grp->id = grp;
568  	if (id)
569  		grp->id = id;
570  	grp->color = 0;
571  
572  	spin_lock_irqsave(&dev->devres_lock, flags);
573  	add_dr(dev, &grp->node[0]);
574  	spin_unlock_irqrestore(&dev->devres_lock, flags);
575  	return grp->id;
576  }
577  EXPORT_SYMBOL_GPL(devres_open_group);
578  
579  /* Find devres group with ID @id.  If @id is NULL, look for the latest. */
find_group(struct device * dev,void * id)580  static struct devres_group *find_group(struct device *dev, void *id)
581  {
582  	struct devres_node *node;
583  
584  	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
585  		struct devres_group *grp;
586  
587  		if (node->release != &group_open_release)
588  			continue;
589  
590  		grp = container_of(node, struct devres_group, node[0]);
591  
592  		if (id) {
593  			if (grp->id == id)
594  				return grp;
595  		} else if (list_empty(&grp->node[1].entry))
596  			return grp;
597  	}
598  
599  	return NULL;
600  }
601  
602  /**
603   * devres_close_group - Close a devres group
604   * @dev: Device to close devres group for
605   * @id: ID of target group, can be NULL
606   *
607   * Close the group identified by @id.  If @id is NULL, the latest open
608   * group is selected.
609   */
devres_close_group(struct device * dev,void * id)610  void devres_close_group(struct device *dev, void *id)
611  {
612  	struct devres_group *grp;
613  	unsigned long flags;
614  
615  	spin_lock_irqsave(&dev->devres_lock, flags);
616  
617  	grp = find_group(dev, id);
618  	if (grp)
619  		add_dr(dev, &grp->node[1]);
620  	else
621  		WARN_ON(1);
622  
623  	spin_unlock_irqrestore(&dev->devres_lock, flags);
624  }
625  EXPORT_SYMBOL_GPL(devres_close_group);
626  
627  /**
628   * devres_remove_group - Remove a devres group
629   * @dev: Device to remove group for
630   * @id: ID of target group, can be NULL
631   *
632   * Remove the group identified by @id.  If @id is NULL, the latest
633   * open group is selected.  Note that removing a group doesn't affect
634   * any other resources.
635   */
devres_remove_group(struct device * dev,void * id)636  void devres_remove_group(struct device *dev, void *id)
637  {
638  	struct devres_group *grp;
639  	unsigned long flags;
640  
641  	spin_lock_irqsave(&dev->devres_lock, flags);
642  
643  	grp = find_group(dev, id);
644  	if (grp) {
645  		list_del_init(&grp->node[0].entry);
646  		list_del_init(&grp->node[1].entry);
647  		devres_log(dev, &grp->node[0], "REM");
648  	} else
649  		WARN_ON(1);
650  
651  	spin_unlock_irqrestore(&dev->devres_lock, flags);
652  
653  	kfree(grp);
654  }
655  EXPORT_SYMBOL_GPL(devres_remove_group);
656  
657  /**
658   * devres_release_group - Release resources in a devres group
659   * @dev: Device to release group for
660   * @id: ID of target group, can be NULL
661   *
662   * Release all resources in the group identified by @id.  If @id is
663   * NULL, the latest open group is selected.  The selected group and
664   * groups properly nested inside the selected group are removed.
665   *
666   * RETURNS:
667   * The number of released non-group resources.
668   */
devres_release_group(struct device * dev,void * id)669  int devres_release_group(struct device *dev, void *id)
670  {
671  	struct devres_group *grp;
672  	unsigned long flags;
673  	LIST_HEAD(todo);
674  	int cnt = 0;
675  
676  	spin_lock_irqsave(&dev->devres_lock, flags);
677  
678  	grp = find_group(dev, id);
679  	if (grp) {
680  		struct list_head *first = &grp->node[0].entry;
681  		struct list_head *end = &dev->devres_head;
682  
683  		if (!list_empty(&grp->node[1].entry))
684  			end = grp->node[1].entry.next;
685  
686  		cnt = remove_nodes(dev, first, end, &todo);
687  		spin_unlock_irqrestore(&dev->devres_lock, flags);
688  
689  		release_nodes(dev, &todo);
690  	} else {
691  		WARN_ON(1);
692  		spin_unlock_irqrestore(&dev->devres_lock, flags);
693  	}
694  
695  	return cnt;
696  }
697  EXPORT_SYMBOL_GPL(devres_release_group);
698  
699  /*
700   * Custom devres actions allow inserting a simple function call
701   * into the teardown sequence.
702   */
703  
704  struct action_devres {
705  	void *data;
706  	void (*action)(void *);
707  };
708  
devm_action_match(struct device * dev,void * res,void * p)709  static int devm_action_match(struct device *dev, void *res, void *p)
710  {
711  	struct action_devres *devres = res;
712  	struct action_devres *target = p;
713  
714  	return devres->action == target->action &&
715  	       devres->data == target->data;
716  }
717  
devm_action_release(struct device * dev,void * res)718  static void devm_action_release(struct device *dev, void *res)
719  {
720  	struct action_devres *devres = res;
721  
722  	devres->action(devres->data);
723  }
724  
725  /**
726   * __devm_add_action() - add a custom action to list of managed resources
727   * @dev: Device that owns the action
728   * @action: Function that should be called
729   * @data: Pointer to data passed to @action implementation
730   * @name: Name of the resource (for debugging purposes)
731   *
732   * This adds a custom action to the list of managed resources so that
733   * it gets executed as part of standard resource unwinding.
734   */
__devm_add_action(struct device * dev,void (* action)(void *),void * data,const char * name)735  int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
736  {
737  	struct action_devres *devres;
738  
739  	devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
740  				     GFP_KERNEL, NUMA_NO_NODE, name);
741  	if (!devres)
742  		return -ENOMEM;
743  
744  	devres->data = data;
745  	devres->action = action;
746  
747  	devres_add(dev, devres);
748  	return 0;
749  }
750  EXPORT_SYMBOL_GPL(__devm_add_action);
751  
752  /**
753   * devm_remove_action() - removes previously added custom action
754   * @dev: Device that owns the action
755   * @action: Function implementing the action
756   * @data: Pointer to data passed to @action implementation
757   *
758   * Removes instance of @action previously added by devm_add_action().
759   * Both action and data should match one of the existing entries.
760   */
devm_remove_action(struct device * dev,void (* action)(void *),void * data)761  void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
762  {
763  	struct action_devres devres = {
764  		.data = data,
765  		.action = action,
766  	};
767  
768  	WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
769  			       &devres));
770  }
771  EXPORT_SYMBOL_GPL(devm_remove_action);
772  
773  /**
774   * devm_release_action() - release previously added custom action
775   * @dev: Device that owns the action
776   * @action: Function implementing the action
777   * @data: Pointer to data passed to @action implementation
778   *
779   * Releases and removes instance of @action previously added by
780   * devm_add_action().  Both action and data should match one of the
781   * existing entries.
782   */
devm_release_action(struct device * dev,void (* action)(void *),void * data)783  void devm_release_action(struct device *dev, void (*action)(void *), void *data)
784  {
785  	struct action_devres devres = {
786  		.data = data,
787  		.action = action,
788  	};
789  
790  	WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
791  			       &devres));
792  
793  }
794  EXPORT_SYMBOL_GPL(devm_release_action);
795  
796  /*
797   * Managed kmalloc/kfree
798   */
devm_kmalloc_release(struct device * dev,void * res)799  static void devm_kmalloc_release(struct device *dev, void *res)
800  {
801  	/* noop */
802  }
803  
devm_kmalloc_match(struct device * dev,void * res,void * data)804  static int devm_kmalloc_match(struct device *dev, void *res, void *data)
805  {
806  	return res == data;
807  }
808  
809  /**
810   * devm_kmalloc - Resource-managed kmalloc
811   * @dev: Device to allocate memory for
812   * @size: Allocation size
813   * @gfp: Allocation gfp flags
814   *
815   * Managed kmalloc.  Memory allocated with this function is
816   * automatically freed on driver detach.  Like all other devres
817   * resources, guaranteed alignment is unsigned long long.
818   *
819   * RETURNS:
820   * Pointer to allocated memory on success, NULL on failure.
821   */
devm_kmalloc(struct device * dev,size_t size,gfp_t gfp)822  void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
823  {
824  	struct devres *dr;
825  
826  	if (unlikely(!size))
827  		return ZERO_SIZE_PTR;
828  
829  	/* use raw alloc_dr for kmalloc caller tracing */
830  	dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
831  	if (unlikely(!dr))
832  		return NULL;
833  
834  	/*
835  	 * This is named devm_kzalloc_release for historical reasons
836  	 * The initial implementation did not support kmalloc, only kzalloc
837  	 */
838  	set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
839  	devres_add(dev, dr->data);
840  	return dr->data;
841  }
842  EXPORT_SYMBOL_GPL(devm_kmalloc);
843  
844  /**
845   * devm_krealloc - Resource-managed krealloc()
846   * @dev: Device to re-allocate memory for
847   * @ptr: Pointer to the memory chunk to re-allocate
848   * @new_size: New allocation size
849   * @gfp: Allocation gfp flags
850   *
851   * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
852   * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
853   * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
854   * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
855   * change the order in which the release callback for the re-alloc'ed devres
856   * will be called (except when falling back to devm_kmalloc() or when freeing
857   * resources when new_size is zero). The contents of the memory are preserved
858   * up to the lesser of new and old sizes.
859   */
devm_krealloc(struct device * dev,void * ptr,size_t new_size,gfp_t gfp)860  void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
861  {
862  	size_t total_new_size, total_old_size;
863  	struct devres *old_dr, *new_dr;
864  	unsigned long flags;
865  
866  	if (unlikely(!new_size)) {
867  		devm_kfree(dev, ptr);
868  		return ZERO_SIZE_PTR;
869  	}
870  
871  	if (unlikely(ZERO_OR_NULL_PTR(ptr)))
872  		return devm_kmalloc(dev, new_size, gfp);
873  
874  	if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
875  		/*
876  		 * We cannot reliably realloc a const string returned by
877  		 * devm_kstrdup_const().
878  		 */
879  		return NULL;
880  
881  	if (!check_dr_size(new_size, &total_new_size))
882  		return NULL;
883  
884  	total_old_size = ksize(container_of(ptr, struct devres, data));
885  	if (total_old_size == 0) {
886  		WARN(1, "Pointer doesn't point to dynamically allocated memory.");
887  		return NULL;
888  	}
889  
890  	/*
891  	 * If new size is smaller or equal to the actual number of bytes
892  	 * allocated previously - just return the same pointer.
893  	 */
894  	if (total_new_size <= total_old_size)
895  		return ptr;
896  
897  	/*
898  	 * Otherwise: allocate new, larger chunk. We need to allocate before
899  	 * taking the lock as most probably the caller uses GFP_KERNEL.
900  	 * alloc_dr() will call check_dr_size() to reserve extra memory
901  	 * for struct devres automatically, so size @new_size user request
902  	 * is delivered to it directly as devm_kmalloc() does.
903  	 */
904  	new_dr = alloc_dr(devm_kmalloc_release,
905  			  new_size, gfp, dev_to_node(dev));
906  	if (!new_dr)
907  		return NULL;
908  
909  	/*
910  	 * The spinlock protects the linked list against concurrent
911  	 * modifications but not the resource itself.
912  	 */
913  	spin_lock_irqsave(&dev->devres_lock, flags);
914  
915  	old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
916  	if (!old_dr) {
917  		spin_unlock_irqrestore(&dev->devres_lock, flags);
918  		kfree(new_dr);
919  		WARN(1, "Memory chunk not managed or managed by a different device.");
920  		return NULL;
921  	}
922  
923  	replace_dr(dev, &old_dr->node, &new_dr->node);
924  
925  	spin_unlock_irqrestore(&dev->devres_lock, flags);
926  
927  	/*
928  	 * We can copy the memory contents after releasing the lock as we're
929  	 * no longer modifying the list links.
930  	 */
931  	memcpy(new_dr->data, old_dr->data,
932  	       total_old_size - offsetof(struct devres, data));
933  	/*
934  	 * Same for releasing the old devres - it's now been removed from the
935  	 * list. This is also the reason why we must not use devm_kfree() - the
936  	 * links are no longer valid.
937  	 */
938  	kfree(old_dr);
939  
940  	return new_dr->data;
941  }
942  EXPORT_SYMBOL_GPL(devm_krealloc);
943  
944  /**
945   * devm_kstrdup - Allocate resource managed space and
946   *                copy an existing string into that.
947   * @dev: Device to allocate memory for
948   * @s: the string to duplicate
949   * @gfp: the GFP mask used in the devm_kmalloc() call when
950   *       allocating memory
951   * RETURNS:
952   * Pointer to allocated string on success, NULL on failure.
953   */
devm_kstrdup(struct device * dev,const char * s,gfp_t gfp)954  char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
955  {
956  	size_t size;
957  	char *buf;
958  
959  	if (!s)
960  		return NULL;
961  
962  	size = strlen(s) + 1;
963  	buf = devm_kmalloc(dev, size, gfp);
964  	if (buf)
965  		memcpy(buf, s, size);
966  	return buf;
967  }
968  EXPORT_SYMBOL_GPL(devm_kstrdup);
969  
970  /**
971   * devm_kstrdup_const - resource managed conditional string duplication
972   * @dev: device for which to duplicate the string
973   * @s: the string to duplicate
974   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
975   *
976   * Strings allocated by devm_kstrdup_const will be automatically freed when
977   * the associated device is detached.
978   *
979   * RETURNS:
980   * Source string if it is in .rodata section otherwise it falls back to
981   * devm_kstrdup.
982   */
devm_kstrdup_const(struct device * dev,const char * s,gfp_t gfp)983  const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
984  {
985  	if (is_kernel_rodata((unsigned long)s))
986  		return s;
987  
988  	return devm_kstrdup(dev, s, gfp);
989  }
990  EXPORT_SYMBOL_GPL(devm_kstrdup_const);
991  
992  /**
993   * devm_kvasprintf - Allocate resource managed space and format a string
994   *		     into that.
995   * @dev: Device to allocate memory for
996   * @gfp: the GFP mask used in the devm_kmalloc() call when
997   *       allocating memory
998   * @fmt: The printf()-style format string
999   * @ap: Arguments for the format string
1000   * RETURNS:
1001   * Pointer to allocated string on success, NULL on failure.
1002   */
devm_kvasprintf(struct device * dev,gfp_t gfp,const char * fmt,va_list ap)1003  char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
1004  		      va_list ap)
1005  {
1006  	unsigned int len;
1007  	char *p;
1008  	va_list aq;
1009  
1010  	va_copy(aq, ap);
1011  	len = vsnprintf(NULL, 0, fmt, aq);
1012  	va_end(aq);
1013  
1014  	p = devm_kmalloc(dev, len+1, gfp);
1015  	if (!p)
1016  		return NULL;
1017  
1018  	vsnprintf(p, len+1, fmt, ap);
1019  
1020  	return p;
1021  }
1022  EXPORT_SYMBOL(devm_kvasprintf);
1023  
1024  /**
1025   * devm_kasprintf - Allocate resource managed space and format a string
1026   *		    into that.
1027   * @dev: Device to allocate memory for
1028   * @gfp: the GFP mask used in the devm_kmalloc() call when
1029   *       allocating memory
1030   * @fmt: The printf()-style format string
1031   * @...: Arguments for the format string
1032   * RETURNS:
1033   * Pointer to allocated string on success, NULL on failure.
1034   */
devm_kasprintf(struct device * dev,gfp_t gfp,const char * fmt,...)1035  char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1036  {
1037  	va_list ap;
1038  	char *p;
1039  
1040  	va_start(ap, fmt);
1041  	p = devm_kvasprintf(dev, gfp, fmt, ap);
1042  	va_end(ap);
1043  
1044  	return p;
1045  }
1046  EXPORT_SYMBOL_GPL(devm_kasprintf);
1047  
1048  /**
1049   * devm_kfree - Resource-managed kfree
1050   * @dev: Device this memory belongs to
1051   * @p: Memory to free
1052   *
1053   * Free memory allocated with devm_kmalloc().
1054   */
devm_kfree(struct device * dev,const void * p)1055  void devm_kfree(struct device *dev, const void *p)
1056  {
1057  	int rc;
1058  
1059  	/*
1060  	 * Special cases: pointer to a string in .rodata returned by
1061  	 * devm_kstrdup_const() or NULL/ZERO ptr.
1062  	 */
1063  	if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
1064  		return;
1065  
1066  	rc = devres_destroy(dev, devm_kmalloc_release,
1067  			    devm_kmalloc_match, (void *)p);
1068  	WARN_ON(rc);
1069  }
1070  EXPORT_SYMBOL_GPL(devm_kfree);
1071  
1072  /**
1073   * devm_kmemdup - Resource-managed kmemdup
1074   * @dev: Device this memory belongs to
1075   * @src: Memory region to duplicate
1076   * @len: Memory region length
1077   * @gfp: GFP mask to use
1078   *
1079   * Duplicate region of a memory using resource managed kmalloc
1080   */
devm_kmemdup(struct device * dev,const void * src,size_t len,gfp_t gfp)1081  void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
1082  {
1083  	void *p;
1084  
1085  	p = devm_kmalloc(dev, len, gfp);
1086  	if (p)
1087  		memcpy(p, src, len);
1088  
1089  	return p;
1090  }
1091  EXPORT_SYMBOL_GPL(devm_kmemdup);
1092  
1093  struct pages_devres {
1094  	unsigned long addr;
1095  	unsigned int order;
1096  };
1097  
devm_pages_match(struct device * dev,void * res,void * p)1098  static int devm_pages_match(struct device *dev, void *res, void *p)
1099  {
1100  	struct pages_devres *devres = res;
1101  	struct pages_devres *target = p;
1102  
1103  	return devres->addr == target->addr;
1104  }
1105  
devm_pages_release(struct device * dev,void * res)1106  static void devm_pages_release(struct device *dev, void *res)
1107  {
1108  	struct pages_devres *devres = res;
1109  
1110  	free_pages(devres->addr, devres->order);
1111  }
1112  
1113  /**
1114   * devm_get_free_pages - Resource-managed __get_free_pages
1115   * @dev: Device to allocate memory for
1116   * @gfp_mask: Allocation gfp flags
1117   * @order: Allocation size is (1 << order) pages
1118   *
1119   * Managed get_free_pages.  Memory allocated with this function is
1120   * automatically freed on driver detach.
1121   *
1122   * RETURNS:
1123   * Address of allocated memory on success, 0 on failure.
1124   */
1125  
devm_get_free_pages(struct device * dev,gfp_t gfp_mask,unsigned int order)1126  unsigned long devm_get_free_pages(struct device *dev,
1127  				  gfp_t gfp_mask, unsigned int order)
1128  {
1129  	struct pages_devres *devres;
1130  	unsigned long addr;
1131  
1132  	addr = __get_free_pages(gfp_mask, order);
1133  
1134  	if (unlikely(!addr))
1135  		return 0;
1136  
1137  	devres = devres_alloc(devm_pages_release,
1138  			      sizeof(struct pages_devres), GFP_KERNEL);
1139  	if (unlikely(!devres)) {
1140  		free_pages(addr, order);
1141  		return 0;
1142  	}
1143  
1144  	devres->addr = addr;
1145  	devres->order = order;
1146  
1147  	devres_add(dev, devres);
1148  	return addr;
1149  }
1150  EXPORT_SYMBOL_GPL(devm_get_free_pages);
1151  
1152  /**
1153   * devm_free_pages - Resource-managed free_pages
1154   * @dev: Device this memory belongs to
1155   * @addr: Memory to free
1156   *
1157   * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1158   * there is no need to supply the @order.
1159   */
devm_free_pages(struct device * dev,unsigned long addr)1160  void devm_free_pages(struct device *dev, unsigned long addr)
1161  {
1162  	struct pages_devres devres = { .addr = addr };
1163  
1164  	WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1165  			       &devres));
1166  }
1167  EXPORT_SYMBOL_GPL(devm_free_pages);
1168  
devm_percpu_release(struct device * dev,void * pdata)1169  static void devm_percpu_release(struct device *dev, void *pdata)
1170  {
1171  	void __percpu *p;
1172  
1173  	p = *(void __percpu **)pdata;
1174  	free_percpu(p);
1175  }
1176  
devm_percpu_match(struct device * dev,void * data,void * p)1177  static int devm_percpu_match(struct device *dev, void *data, void *p)
1178  {
1179  	struct devres *devr = container_of(data, struct devres, data);
1180  
1181  	return *(void **)devr->data == p;
1182  }
1183  
1184  /**
1185   * __devm_alloc_percpu - Resource-managed alloc_percpu
1186   * @dev: Device to allocate per-cpu memory for
1187   * @size: Size of per-cpu memory to allocate
1188   * @align: Alignment of per-cpu memory to allocate
1189   *
1190   * Managed alloc_percpu. Per-cpu memory allocated with this function is
1191   * automatically freed on driver detach.
1192   *
1193   * RETURNS:
1194   * Pointer to allocated memory on success, NULL on failure.
1195   */
__devm_alloc_percpu(struct device * dev,size_t size,size_t align)1196  void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1197  		size_t align)
1198  {
1199  	void *p;
1200  	void __percpu *pcpu;
1201  
1202  	pcpu = __alloc_percpu(size, align);
1203  	if (!pcpu)
1204  		return NULL;
1205  
1206  	p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1207  	if (!p) {
1208  		free_percpu(pcpu);
1209  		return NULL;
1210  	}
1211  
1212  	*(void __percpu **)p = pcpu;
1213  
1214  	devres_add(dev, p);
1215  
1216  	return pcpu;
1217  }
1218  EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1219  
1220  /**
1221   * devm_free_percpu - Resource-managed free_percpu
1222   * @dev: Device this memory belongs to
1223   * @pdata: Per-cpu memory to free
1224   *
1225   * Free memory allocated with devm_alloc_percpu().
1226   */
devm_free_percpu(struct device * dev,void __percpu * pdata)1227  void devm_free_percpu(struct device *dev, void __percpu *pdata)
1228  {
1229  	/*
1230  	 * Use devres_release() to prevent memory leakage as
1231  	 * devm_free_pages() does.
1232  	 */
1233  	WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
1234  			       (void *)(__force unsigned long)pdata));
1235  }
1236  EXPORT_SYMBOL_GPL(devm_free_percpu);
1237