1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * attribute_container.c - implementation of a simple container for classes
4   *
5   * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
6   *
7   * The basic idea here is to enable a device to be attached to an
8   * aritrary numer of classes without having to allocate storage for them.
9   * Instead, the contained classes select the devices they need to attach
10   * to via a matching function.
11   */
12  
13  #include <linux/attribute_container.h>
14  #include <linux/device.h>
15  #include <linux/kernel.h>
16  #include <linux/slab.h>
17  #include <linux/list.h>
18  #include <linux/module.h>
19  #include <linux/mutex.h>
20  
21  #include "base.h"
22  
23  /* This is a private structure used to tie the classdev and the
24   * container .. it should never be visible outside this file */
25  struct internal_container {
26  	struct klist_node node;
27  	struct attribute_container *cont;
28  	struct device classdev;
29  };
30  
internal_container_klist_get(struct klist_node * n)31  static void internal_container_klist_get(struct klist_node *n)
32  {
33  	struct internal_container *ic =
34  		container_of(n, struct internal_container, node);
35  	get_device(&ic->classdev);
36  }
37  
internal_container_klist_put(struct klist_node * n)38  static void internal_container_klist_put(struct klist_node *n)
39  {
40  	struct internal_container *ic =
41  		container_of(n, struct internal_container, node);
42  	put_device(&ic->classdev);
43  }
44  
45  
46  /**
47   * attribute_container_classdev_to_container - given a classdev, return the container
48   *
49   * @classdev: the class device created by attribute_container_add_device.
50   *
51   * Returns the container associated with this classdev.
52   */
53  struct attribute_container *
attribute_container_classdev_to_container(struct device * classdev)54  attribute_container_classdev_to_container(struct device *classdev)
55  {
56  	struct internal_container *ic =
57  		container_of(classdev, struct internal_container, classdev);
58  	return ic->cont;
59  }
60  EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
61  
62  static LIST_HEAD(attribute_container_list);
63  
64  static DEFINE_MUTEX(attribute_container_mutex);
65  
66  /**
67   * attribute_container_register - register an attribute container
68   *
69   * @cont: The container to register.  This must be allocated by the
70   *        callee and should also be zeroed by it.
71   */
72  int
attribute_container_register(struct attribute_container * cont)73  attribute_container_register(struct attribute_container *cont)
74  {
75  	INIT_LIST_HEAD(&cont->node);
76  	klist_init(&cont->containers, internal_container_klist_get,
77  		   internal_container_klist_put);
78  
79  	mutex_lock(&attribute_container_mutex);
80  	list_add_tail(&cont->node, &attribute_container_list);
81  	mutex_unlock(&attribute_container_mutex);
82  
83  	return 0;
84  }
85  EXPORT_SYMBOL_GPL(attribute_container_register);
86  
87  /**
88   * attribute_container_unregister - remove a container registration
89   *
90   * @cont: previously registered container to remove
91   */
92  int
attribute_container_unregister(struct attribute_container * cont)93  attribute_container_unregister(struct attribute_container *cont)
94  {
95  	int retval = -EBUSY;
96  
97  	mutex_lock(&attribute_container_mutex);
98  	spin_lock(&cont->containers.k_lock);
99  	if (!list_empty(&cont->containers.k_list))
100  		goto out;
101  	retval = 0;
102  	list_del(&cont->node);
103   out:
104  	spin_unlock(&cont->containers.k_lock);
105  	mutex_unlock(&attribute_container_mutex);
106  	return retval;
107  
108  }
109  EXPORT_SYMBOL_GPL(attribute_container_unregister);
110  
111  /* private function used as class release */
attribute_container_release(struct device * classdev)112  static void attribute_container_release(struct device *classdev)
113  {
114  	struct internal_container *ic
115  		= container_of(classdev, struct internal_container, classdev);
116  	struct device *dev = classdev->parent;
117  
118  	kfree(ic);
119  	put_device(dev);
120  }
121  
122  /**
123   * attribute_container_add_device - see if any container is interested in dev
124   *
125   * @dev: device to add attributes to
126   * @fn:	 function to trigger addition of class device.
127   *
128   * This function allocates storage for the class device(s) to be
129   * attached to dev (one for each matching attribute_container).  If no
130   * fn is provided, the code will simply register the class device via
131   * device_add.  If a function is provided, it is expected to add
132   * the class device at the appropriate time.  One of the things that
133   * might be necessary is to allocate and initialise the classdev and
134   * then add it a later time.  To do this, call this routine for
135   * allocation and initialisation and then use
136   * attribute_container_device_trigger() to call device_add() on
137   * it.  Note: after this, the class device contains a reference to dev
138   * which is not relinquished until the release of the classdev.
139   */
140  void
attribute_container_add_device(struct device * dev,int (* fn)(struct attribute_container *,struct device *,struct device *))141  attribute_container_add_device(struct device *dev,
142  			       int (*fn)(struct attribute_container *,
143  					 struct device *,
144  					 struct device *))
145  {
146  	struct attribute_container *cont;
147  
148  	mutex_lock(&attribute_container_mutex);
149  	list_for_each_entry(cont, &attribute_container_list, node) {
150  		struct internal_container *ic;
151  
152  		if (attribute_container_no_classdevs(cont))
153  			continue;
154  
155  		if (!cont->match(cont, dev))
156  			continue;
157  
158  		ic = kzalloc(sizeof(*ic), GFP_KERNEL);
159  		if (!ic) {
160  			dev_err(dev, "failed to allocate class container\n");
161  			continue;
162  		}
163  
164  		ic->cont = cont;
165  		device_initialize(&ic->classdev);
166  		ic->classdev.parent = get_device(dev);
167  		ic->classdev.class = cont->class;
168  		cont->class->dev_release = attribute_container_release;
169  		dev_set_name(&ic->classdev, "%s", dev_name(dev));
170  		if (fn)
171  			fn(cont, dev, &ic->classdev);
172  		else
173  			attribute_container_add_class_device(&ic->classdev);
174  		klist_add_tail(&ic->node, &cont->containers);
175  	}
176  	mutex_unlock(&attribute_container_mutex);
177  }
178  
179  /* FIXME: can't break out of this unless klist_iter_exit is also
180   * called before doing the break
181   */
182  #define klist_for_each_entry(pos, head, member, iter) \
183  	for (klist_iter_init(head, iter); (pos = ({ \
184  		struct klist_node *n = klist_next(iter); \
185  		n ? container_of(n, typeof(*pos), member) : \
186  			({ klist_iter_exit(iter) ; NULL; }); \
187  	})) != NULL;)
188  
189  
190  /**
191   * attribute_container_remove_device - make device eligible for removal.
192   *
193   * @dev:  The generic device
194   * @fn:	  A function to call to remove the device
195   *
196   * This routine triggers device removal.  If fn is NULL, then it is
197   * simply done via device_unregister (note that if something
198   * still has a reference to the classdev, then the memory occupied
199   * will not be freed until the classdev is released).  If you want a
200   * two phase release: remove from visibility and then delete the
201   * device, then you should use this routine with a fn that calls
202   * device_del() and then use attribute_container_device_trigger()
203   * to do the final put on the classdev.
204   */
205  void
attribute_container_remove_device(struct device * dev,void (* fn)(struct attribute_container *,struct device *,struct device *))206  attribute_container_remove_device(struct device *dev,
207  				  void (*fn)(struct attribute_container *,
208  					     struct device *,
209  					     struct device *))
210  {
211  	struct attribute_container *cont;
212  
213  	mutex_lock(&attribute_container_mutex);
214  	list_for_each_entry(cont, &attribute_container_list, node) {
215  		struct internal_container *ic;
216  		struct klist_iter iter;
217  
218  		if (attribute_container_no_classdevs(cont))
219  			continue;
220  
221  		if (!cont->match(cont, dev))
222  			continue;
223  
224  		klist_for_each_entry(ic, &cont->containers, node, &iter) {
225  			if (dev != ic->classdev.parent)
226  				continue;
227  			klist_del(&ic->node);
228  			if (fn)
229  				fn(cont, dev, &ic->classdev);
230  			else {
231  				attribute_container_remove_attrs(&ic->classdev);
232  				device_unregister(&ic->classdev);
233  			}
234  		}
235  	}
236  	mutex_unlock(&attribute_container_mutex);
237  }
238  
239  static int
do_attribute_container_device_trigger_safe(struct device * dev,struct attribute_container * cont,int (* fn)(struct attribute_container *,struct device *,struct device *),int (* undo)(struct attribute_container *,struct device *,struct device *))240  do_attribute_container_device_trigger_safe(struct device *dev,
241  					   struct attribute_container *cont,
242  					   int (*fn)(struct attribute_container *,
243  						     struct device *, struct device *),
244  					   int (*undo)(struct attribute_container *,
245  						       struct device *, struct device *))
246  {
247  	int ret;
248  	struct internal_container *ic, *failed;
249  	struct klist_iter iter;
250  
251  	if (attribute_container_no_classdevs(cont))
252  		return fn(cont, dev, NULL);
253  
254  	klist_for_each_entry(ic, &cont->containers, node, &iter) {
255  		if (dev == ic->classdev.parent) {
256  			ret = fn(cont, dev, &ic->classdev);
257  			if (ret) {
258  				failed = ic;
259  				klist_iter_exit(&iter);
260  				goto fail;
261  			}
262  		}
263  	}
264  	return 0;
265  
266  fail:
267  	if (!undo)
268  		return ret;
269  
270  	/* Attempt to undo the work partially done. */
271  	klist_for_each_entry(ic, &cont->containers, node, &iter) {
272  		if (ic == failed) {
273  			klist_iter_exit(&iter);
274  			break;
275  		}
276  		if (dev == ic->classdev.parent)
277  			undo(cont, dev, &ic->classdev);
278  	}
279  	return ret;
280  }
281  
282  /**
283   * attribute_container_device_trigger_safe - execute a trigger for each
284   * matching classdev or fail all of them.
285   *
286   * @dev:  The generic device to run the trigger for
287   * @fn:   the function to execute for each classdev.
288   * @undo: A function to undo the work previously done in case of error
289   *
290   * This function is a safe version of
291   * attribute_container_device_trigger. It stops on the first error and
292   * undo the partial work that has been done, on previous classdev.  It
293   * is guaranteed that either they all succeeded, or none of them
294   * succeeded.
295   */
296  int
attribute_container_device_trigger_safe(struct device * dev,int (* fn)(struct attribute_container *,struct device *,struct device *),int (* undo)(struct attribute_container *,struct device *,struct device *))297  attribute_container_device_trigger_safe(struct device *dev,
298  					int (*fn)(struct attribute_container *,
299  						  struct device *,
300  						  struct device *),
301  					int (*undo)(struct attribute_container *,
302  						    struct device *,
303  						    struct device *))
304  {
305  	struct attribute_container *cont, *failed = NULL;
306  	int ret = 0;
307  
308  	mutex_lock(&attribute_container_mutex);
309  
310  	list_for_each_entry(cont, &attribute_container_list, node) {
311  
312  		if (!cont->match(cont, dev))
313  			continue;
314  
315  		ret = do_attribute_container_device_trigger_safe(dev, cont,
316  								 fn, undo);
317  		if (ret) {
318  			failed = cont;
319  			break;
320  		}
321  	}
322  
323  	if (ret && !WARN_ON(!undo)) {
324  		list_for_each_entry(cont, &attribute_container_list, node) {
325  
326  			if (failed == cont)
327  				break;
328  
329  			if (!cont->match(cont, dev))
330  				continue;
331  
332  			do_attribute_container_device_trigger_safe(dev, cont,
333  								   undo, NULL);
334  		}
335  	}
336  
337  	mutex_unlock(&attribute_container_mutex);
338  	return ret;
339  
340  }
341  
342  /**
343   * attribute_container_device_trigger - execute a trigger for each matching classdev
344   *
345   * @dev:  The generic device to run the trigger for
346   * @fn:   the function to execute for each classdev.
347   *
348   * This function is for executing a trigger when you need to know both
349   * the container and the classdev.
350   */
351  void
attribute_container_device_trigger(struct device * dev,int (* fn)(struct attribute_container *,struct device *,struct device *))352  attribute_container_device_trigger(struct device *dev,
353  				   int (*fn)(struct attribute_container *,
354  					     struct device *,
355  					     struct device *))
356  {
357  	struct attribute_container *cont;
358  
359  	mutex_lock(&attribute_container_mutex);
360  	list_for_each_entry(cont, &attribute_container_list, node) {
361  		struct internal_container *ic;
362  		struct klist_iter iter;
363  
364  		if (!cont->match(cont, dev))
365  			continue;
366  
367  		if (attribute_container_no_classdevs(cont)) {
368  			fn(cont, dev, NULL);
369  			continue;
370  		}
371  
372  		klist_for_each_entry(ic, &cont->containers, node, &iter) {
373  			if (dev == ic->classdev.parent)
374  				fn(cont, dev, &ic->classdev);
375  		}
376  	}
377  	mutex_unlock(&attribute_container_mutex);
378  }
379  
380  /**
381   * attribute_container_add_attrs - add attributes
382   *
383   * @classdev: The class device
384   *
385   * This simply creates all the class device sysfs files from the
386   * attributes listed in the container
387   */
388  int
attribute_container_add_attrs(struct device * classdev)389  attribute_container_add_attrs(struct device *classdev)
390  {
391  	struct attribute_container *cont =
392  		attribute_container_classdev_to_container(classdev);
393  	struct device_attribute **attrs = cont->attrs;
394  	int i, error;
395  
396  	BUG_ON(attrs && cont->grp);
397  
398  	if (!attrs && !cont->grp)
399  		return 0;
400  
401  	if (cont->grp)
402  		return sysfs_create_group(&classdev->kobj, cont->grp);
403  
404  	for (i = 0; attrs[i]; i++) {
405  		sysfs_attr_init(&attrs[i]->attr);
406  		error = device_create_file(classdev, attrs[i]);
407  		if (error)
408  			return error;
409  	}
410  
411  	return 0;
412  }
413  
414  /**
415   * attribute_container_add_class_device - same function as device_add
416   *
417   * @classdev:	the class device to add
418   *
419   * This performs essentially the same function as device_add except for
420   * attribute containers, namely add the classdev to the system and then
421   * create the attribute files
422   */
423  int
attribute_container_add_class_device(struct device * classdev)424  attribute_container_add_class_device(struct device *classdev)
425  {
426  	int error = device_add(classdev);
427  
428  	if (error)
429  		return error;
430  	return attribute_container_add_attrs(classdev);
431  }
432  
433  /**
434   * attribute_container_remove_attrs - remove any attribute files
435   *
436   * @classdev: The class device to remove the files from
437   *
438   */
439  void
attribute_container_remove_attrs(struct device * classdev)440  attribute_container_remove_attrs(struct device *classdev)
441  {
442  	struct attribute_container *cont =
443  		attribute_container_classdev_to_container(classdev);
444  	struct device_attribute **attrs = cont->attrs;
445  	int i;
446  
447  	if (!attrs && !cont->grp)
448  		return;
449  
450  	if (cont->grp) {
451  		sysfs_remove_group(&classdev->kobj, cont->grp);
452  		return ;
453  	}
454  
455  	for (i = 0; attrs[i]; i++)
456  		device_remove_file(classdev, attrs[i]);
457  }
458  
459  /**
460   * attribute_container_class_device_del - equivalent of class_device_del
461   *
462   * @classdev: the class device
463   *
464   * This function simply removes all the attribute files and then calls
465   * device_del.
466   */
467  void
attribute_container_class_device_del(struct device * classdev)468  attribute_container_class_device_del(struct device *classdev)
469  {
470  	attribute_container_remove_attrs(classdev);
471  	device_del(classdev);
472  }
473  
474  /**
475   * attribute_container_find_class_device - find the corresponding class_device
476   *
477   * @cont:	the container
478   * @dev:	the generic device
479   *
480   * Looks up the device in the container's list of class devices and returns
481   * the corresponding class_device.
482   */
483  struct device *
attribute_container_find_class_device(struct attribute_container * cont,struct device * dev)484  attribute_container_find_class_device(struct attribute_container *cont,
485  				      struct device *dev)
486  {
487  	struct device *cdev = NULL;
488  	struct internal_container *ic;
489  	struct klist_iter iter;
490  
491  	klist_for_each_entry(ic, &cont->containers, node, &iter) {
492  		if (ic->classdev.parent == dev) {
493  			cdev = &ic->classdev;
494  			/* FIXME: must exit iterator then break */
495  			klist_iter_exit(&iter);
496  			break;
497  		}
498  	}
499  
500  	return cdev;
501  }
502  EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
503