1  // SPDX-License-Identifier: GPL-2.0-only
2  /* The industrial I/O core, trigger handling functions
3   *
4   * Copyright (c) 2008 Jonathan Cameron
5   */
6  
7  #include <linux/cleanup.h>
8  #include <linux/kernel.h>
9  #include <linux/idr.h>
10  #include <linux/err.h>
11  #include <linux/device.h>
12  #include <linux/interrupt.h>
13  #include <linux/list.h>
14  #include <linux/slab.h>
15  
16  #include <linux/iio/iio.h>
17  #include <linux/iio/iio-opaque.h>
18  #include <linux/iio/trigger.h>
19  #include "iio_core.h"
20  #include "iio_core_trigger.h"
21  #include <linux/iio/trigger_consumer.h>
22  
23  /* RFC - Question of approach
24   * Make the common case (single sensor single trigger)
25   * simple by starting trigger capture from when first sensors
26   * is added.
27   *
28   * Complex simultaneous start requires use of 'hold' functionality
29   * of the trigger. (not implemented)
30   *
31   * Any other suggestions?
32   */
33  
34  static DEFINE_IDA(iio_trigger_ida);
35  
36  /* Single list of all available triggers */
37  static LIST_HEAD(iio_trigger_list);
38  static DEFINE_MUTEX(iio_trigger_list_lock);
39  
40  /**
41   * name_show() - retrieve useful identifying name
42   * @dev:	device associated with the iio_trigger
43   * @attr:	pointer to the device_attribute structure that is
44   *		being processed
45   * @buf:	buffer to print the name into
46   *
47   * Return: a negative number on failure or the number of written
48   *	   characters on success.
49   */
name_show(struct device * dev,struct device_attribute * attr,char * buf)50  static ssize_t name_show(struct device *dev, struct device_attribute *attr,
51  			 char *buf)
52  {
53  	struct iio_trigger *trig = to_iio_trigger(dev);
54  
55  	return sysfs_emit(buf, "%s\n", trig->name);
56  }
57  
58  static DEVICE_ATTR_RO(name);
59  
60  static struct attribute *iio_trig_dev_attrs[] = {
61  	&dev_attr_name.attr,
62  	NULL,
63  };
64  ATTRIBUTE_GROUPS(iio_trig_dev);
65  
66  static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
67  
iio_trigger_register(struct iio_trigger * trig_info)68  int iio_trigger_register(struct iio_trigger *trig_info)
69  {
70  	int ret;
71  
72  	trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
73  	if (trig_info->id < 0)
74  		return trig_info->id;
75  
76  	/* Set the name used for the sysfs directory etc */
77  	dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
78  
79  	ret = device_add(&trig_info->dev);
80  	if (ret)
81  		goto error_unregister_id;
82  
83  	/* Add to list of available triggers held by the IIO core */
84  	scoped_guard(mutex, &iio_trigger_list_lock) {
85  		if (__iio_trigger_find_by_name(trig_info->name)) {
86  			pr_err("Duplicate trigger name '%s'\n", trig_info->name);
87  			ret = -EEXIST;
88  			goto error_device_del;
89  		}
90  		list_add_tail(&trig_info->list, &iio_trigger_list);
91  	}
92  
93  	return 0;
94  
95  error_device_del:
96  	device_del(&trig_info->dev);
97  error_unregister_id:
98  	ida_free(&iio_trigger_ida, trig_info->id);
99  	return ret;
100  }
101  EXPORT_SYMBOL(iio_trigger_register);
102  
iio_trigger_unregister(struct iio_trigger * trig_info)103  void iio_trigger_unregister(struct iio_trigger *trig_info)
104  {
105  	scoped_guard(mutex, &iio_trigger_list_lock)
106  		list_del(&trig_info->list);
107  
108  	ida_free(&iio_trigger_ida, trig_info->id);
109  	/* Possible issue in here */
110  	device_del(&trig_info->dev);
111  }
112  EXPORT_SYMBOL(iio_trigger_unregister);
113  
iio_trigger_set_immutable(struct iio_dev * indio_dev,struct iio_trigger * trig)114  int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
115  {
116  	struct iio_dev_opaque *iio_dev_opaque;
117  
118  	if (!indio_dev || !trig)
119  		return -EINVAL;
120  
121  	iio_dev_opaque = to_iio_dev_opaque(indio_dev);
122  	guard(mutex)(&iio_dev_opaque->mlock);
123  	WARN_ON(iio_dev_opaque->trig_readonly);
124  
125  	indio_dev->trig = iio_trigger_get(trig);
126  	iio_dev_opaque->trig_readonly = true;
127  
128  	return 0;
129  }
130  EXPORT_SYMBOL(iio_trigger_set_immutable);
131  
132  /* Search for trigger by name, assuming iio_trigger_list_lock held */
__iio_trigger_find_by_name(const char * name)133  static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
134  {
135  	struct iio_trigger *iter;
136  
137  	list_for_each_entry(iter, &iio_trigger_list, list)
138  		if (!strcmp(iter->name, name))
139  			return iter;
140  
141  	return NULL;
142  }
143  
iio_trigger_acquire_by_name(const char * name)144  static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
145  {
146  	struct iio_trigger *iter;
147  
148  	guard(mutex)(&iio_trigger_list_lock);
149  	list_for_each_entry(iter, &iio_trigger_list, list)
150  		if (sysfs_streq(iter->name, name))
151  			return iio_trigger_get(iter);
152  
153  	return NULL;
154  }
155  
iio_reenable_work_fn(struct work_struct * work)156  static void iio_reenable_work_fn(struct work_struct *work)
157  {
158  	struct iio_trigger *trig = container_of(work, struct iio_trigger,
159  						reenable_work);
160  
161  	/*
162  	 * This 'might' occur after the trigger state is set to disabled -
163  	 * in that case the driver should skip reenabling.
164  	 */
165  	trig->ops->reenable(trig);
166  }
167  
168  /*
169   * In general, reenable callbacks may need to sleep and this path is
170   * not performance sensitive, so just queue up a work item
171   * to reneable the trigger for us.
172   *
173   * Races that can cause this.
174   * 1) A handler occurs entirely in interrupt context so the counter
175   *    the final decrement is still in this interrupt.
176   * 2) The trigger has been removed, but one last interrupt gets through.
177   *
178   * For (1) we must call reenable, but not in atomic context.
179   * For (2) it should be safe to call reenanble, if drivers never blindly
180   * reenable after state is off.
181   */
iio_trigger_notify_done_atomic(struct iio_trigger * trig)182  static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
183  {
184  	if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
185  	    trig->ops->reenable)
186  		schedule_work(&trig->reenable_work);
187  }
188  
189  /**
190   * iio_trigger_poll() - Call the IRQ trigger handler of the consumers
191   * @trig: trigger which occurred
192   *
193   * This function should only be called from a hard IRQ context.
194   */
iio_trigger_poll(struct iio_trigger * trig)195  void iio_trigger_poll(struct iio_trigger *trig)
196  {
197  	int i;
198  
199  	if (!atomic_read(&trig->use_count)) {
200  		atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
201  
202  		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
203  			if (trig->subirqs[i].enabled)
204  				generic_handle_irq(trig->subirq_base + i);
205  			else
206  				iio_trigger_notify_done_atomic(trig);
207  		}
208  	}
209  }
210  EXPORT_SYMBOL(iio_trigger_poll);
211  
iio_trigger_generic_data_rdy_poll(int irq,void * private)212  irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
213  {
214  	iio_trigger_poll(private);
215  	return IRQ_HANDLED;
216  }
217  EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
218  
219  /**
220   * iio_trigger_poll_nested() - Call the threaded trigger handler of the
221   * consumers
222   * @trig: trigger which occurred
223   *
224   * This function should only be called from a kernel thread context.
225   */
iio_trigger_poll_nested(struct iio_trigger * trig)226  void iio_trigger_poll_nested(struct iio_trigger *trig)
227  {
228  	int i;
229  
230  	if (!atomic_read(&trig->use_count)) {
231  		atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
232  
233  		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
234  			if (trig->subirqs[i].enabled)
235  				handle_nested_irq(trig->subirq_base + i);
236  			else
237  				iio_trigger_notify_done(trig);
238  		}
239  	}
240  }
241  EXPORT_SYMBOL(iio_trigger_poll_nested);
242  
iio_trigger_notify_done(struct iio_trigger * trig)243  void iio_trigger_notify_done(struct iio_trigger *trig)
244  {
245  	if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
246  	    trig->ops->reenable)
247  		trig->ops->reenable(trig);
248  }
249  EXPORT_SYMBOL(iio_trigger_notify_done);
250  
251  /* Trigger Consumer related functions */
iio_trigger_get_irq(struct iio_trigger * trig)252  static int iio_trigger_get_irq(struct iio_trigger *trig)
253  {
254  	int ret;
255  
256  	scoped_guard(mutex, &trig->pool_lock) {
257  		ret = bitmap_find_free_region(trig->pool,
258  					      CONFIG_IIO_CONSUMERS_PER_TRIGGER,
259  					      ilog2(1));
260  		if (ret < 0)
261  			return ret;
262  	}
263  
264  	return ret + trig->subirq_base;
265  }
266  
iio_trigger_put_irq(struct iio_trigger * trig,int irq)267  static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
268  {
269  	guard(mutex)(&trig->pool_lock);
270  	clear_bit(irq - trig->subirq_base, trig->pool);
271  }
272  
273  /* Complexity in here.  With certain triggers (datardy) an acknowledgement
274   * may be needed if the pollfuncs do not include the data read for the
275   * triggering device.
276   * This is not currently handled.  Alternative of not enabling trigger unless
277   * the relevant function is in there may be the best option.
278   */
279  /* Worth protecting against double additions? */
iio_trigger_attach_poll_func(struct iio_trigger * trig,struct iio_poll_func * pf)280  int iio_trigger_attach_poll_func(struct iio_trigger *trig,
281  				 struct iio_poll_func *pf)
282  {
283  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
284  	bool notinuse =
285  		bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
286  	int ret = 0;
287  
288  	/* Prevent the module from being removed whilst attached to a trigger */
289  	__module_get(iio_dev_opaque->driver_module);
290  
291  	/* Get irq number */
292  	pf->irq = iio_trigger_get_irq(trig);
293  	if (pf->irq < 0) {
294  		pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
295  			trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
296  		goto out_put_module;
297  	}
298  
299  	/* Request irq */
300  	ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
301  				   pf->type, pf->name,
302  				   pf);
303  	if (ret < 0)
304  		goto out_put_irq;
305  
306  	/* Enable trigger in driver */
307  	if (trig->ops && trig->ops->set_trigger_state && notinuse) {
308  		ret = trig->ops->set_trigger_state(trig, true);
309  		if (ret)
310  			goto out_free_irq;
311  	}
312  
313  	/*
314  	 * Check if we just registered to our own trigger: we determine that
315  	 * this is the case if the IIO device and the trigger device share the
316  	 * same parent device.
317  	 */
318  	if (!iio_validate_own_trigger(pf->indio_dev, trig))
319  		trig->attached_own_device = true;
320  
321  	return ret;
322  
323  out_free_irq:
324  	free_irq(pf->irq, pf);
325  out_put_irq:
326  	iio_trigger_put_irq(trig, pf->irq);
327  out_put_module:
328  	module_put(iio_dev_opaque->driver_module);
329  	return ret;
330  }
331  
iio_trigger_detach_poll_func(struct iio_trigger * trig,struct iio_poll_func * pf)332  int iio_trigger_detach_poll_func(struct iio_trigger *trig,
333  				 struct iio_poll_func *pf)
334  {
335  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
336  	bool no_other_users =
337  		bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
338  	int ret = 0;
339  
340  	if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
341  		ret = trig->ops->set_trigger_state(trig, false);
342  		if (ret)
343  			return ret;
344  	}
345  	if (pf->indio_dev->dev.parent == trig->dev.parent)
346  		trig->attached_own_device = false;
347  	iio_trigger_put_irq(trig, pf->irq);
348  	free_irq(pf->irq, pf);
349  	module_put(iio_dev_opaque->driver_module);
350  	pf->irq = 0;
351  
352  	return ret;
353  }
354  
iio_pollfunc_store_time(int irq,void * p)355  irqreturn_t iio_pollfunc_store_time(int irq, void *p)
356  {
357  	struct iio_poll_func *pf = p;
358  
359  	pf->timestamp = iio_get_time_ns(pf->indio_dev);
360  	return IRQ_WAKE_THREAD;
361  }
362  EXPORT_SYMBOL(iio_pollfunc_store_time);
363  
364  struct iio_poll_func
iio_alloc_pollfunc(irqreturn_t (* h)(int irq,void * p),irqreturn_t (* thread)(int irq,void * p),int type,struct iio_dev * indio_dev,const char * fmt,...)365  *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
366  		    irqreturn_t (*thread)(int irq, void *p),
367  		    int type,
368  		    struct iio_dev *indio_dev,
369  		    const char *fmt,
370  		    ...)
371  {
372  	va_list vargs;
373  	struct iio_poll_func *pf;
374  
375  	pf = kmalloc(sizeof(*pf), GFP_KERNEL);
376  	if (!pf)
377  		return NULL;
378  	va_start(vargs, fmt);
379  	pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
380  	va_end(vargs);
381  	if (pf->name == NULL) {
382  		kfree(pf);
383  		return NULL;
384  	}
385  	pf->h = h;
386  	pf->thread = thread;
387  	pf->type = type;
388  	pf->indio_dev = indio_dev;
389  
390  	return pf;
391  }
392  EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
393  
iio_dealloc_pollfunc(struct iio_poll_func * pf)394  void iio_dealloc_pollfunc(struct iio_poll_func *pf)
395  {
396  	kfree(pf->name);
397  	kfree(pf);
398  }
399  EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
400  
401  /**
402   * current_trigger_show() - trigger consumer sysfs query current trigger
403   * @dev:	device associated with an industrial I/O device
404   * @attr:	pointer to the device_attribute structure that
405   *		is being processed
406   * @buf:	buffer where the current trigger name will be printed into
407   *
408   * For trigger consumers the current_trigger interface allows the trigger
409   * used by the device to be queried.
410   *
411   * Return: a negative number on failure, the number of characters written
412   *	   on success or 0 if no trigger is available
413   */
current_trigger_show(struct device * dev,struct device_attribute * attr,char * buf)414  static ssize_t current_trigger_show(struct device *dev,
415  				    struct device_attribute *attr, char *buf)
416  {
417  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
418  
419  	if (indio_dev->trig)
420  		return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
421  	return 0;
422  }
423  
424  /**
425   * current_trigger_store() - trigger consumer sysfs set current trigger
426   * @dev:	device associated with an industrial I/O device
427   * @attr:	device attribute that is being processed
428   * @buf:	string buffer that holds the name of the trigger
429   * @len:	length of the trigger name held by buf
430   *
431   * For trigger consumers the current_trigger interface allows the trigger
432   * used for this device to be specified at run time based on the trigger's
433   * name.
434   *
435   * Return: negative error code on failure or length of the buffer
436   *	   on success
437   */
current_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)438  static ssize_t current_trigger_store(struct device *dev,
439  				     struct device_attribute *attr,
440  				     const char *buf, size_t len)
441  {
442  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
443  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
444  	struct iio_trigger *oldtrig = indio_dev->trig;
445  	struct iio_trigger *trig;
446  	int ret;
447  
448  	scoped_guard(mutex, &iio_dev_opaque->mlock) {
449  		if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED)
450  			return -EBUSY;
451  		if (iio_dev_opaque->trig_readonly)
452  			return -EPERM;
453  	}
454  
455  	trig = iio_trigger_acquire_by_name(buf);
456  	if (oldtrig == trig) {
457  		ret = len;
458  		goto out_trigger_put;
459  	}
460  
461  	if (trig && indio_dev->info->validate_trigger) {
462  		ret = indio_dev->info->validate_trigger(indio_dev, trig);
463  		if (ret)
464  			goto out_trigger_put;
465  	}
466  
467  	if (trig && trig->ops && trig->ops->validate_device) {
468  		ret = trig->ops->validate_device(trig, indio_dev);
469  		if (ret)
470  			goto out_trigger_put;
471  	}
472  
473  	indio_dev->trig = trig;
474  
475  	if (oldtrig) {
476  		if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
477  			iio_trigger_detach_poll_func(oldtrig,
478  						     indio_dev->pollfunc_event);
479  		iio_trigger_put(oldtrig);
480  	}
481  	if (indio_dev->trig) {
482  		if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
483  			iio_trigger_attach_poll_func(indio_dev->trig,
484  						     indio_dev->pollfunc_event);
485  	}
486  
487  	return len;
488  
489  out_trigger_put:
490  	if (trig)
491  		iio_trigger_put(trig);
492  	return ret;
493  }
494  
495  static DEVICE_ATTR_RW(current_trigger);
496  
497  static struct attribute *iio_trigger_consumer_attrs[] = {
498  	&dev_attr_current_trigger.attr,
499  	NULL,
500  };
501  
502  static const struct attribute_group iio_trigger_consumer_attr_group = {
503  	.name = "trigger",
504  	.attrs = iio_trigger_consumer_attrs,
505  };
506  
iio_trig_release(struct device * device)507  static void iio_trig_release(struct device *device)
508  {
509  	struct iio_trigger *trig = to_iio_trigger(device);
510  	int i;
511  
512  	if (trig->subirq_base) {
513  		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
514  			irq_modify_status(trig->subirq_base + i,
515  					  IRQ_NOAUTOEN,
516  					  IRQ_NOREQUEST | IRQ_NOPROBE);
517  			irq_set_chip(trig->subirq_base + i,
518  				     NULL);
519  			irq_set_handler(trig->subirq_base + i,
520  					NULL);
521  		}
522  
523  		irq_free_descs(trig->subirq_base,
524  			       CONFIG_IIO_CONSUMERS_PER_TRIGGER);
525  	}
526  	kfree(trig->name);
527  	kfree(trig);
528  }
529  
530  static const struct device_type iio_trig_type = {
531  	.release = iio_trig_release,
532  	.groups = iio_trig_dev_groups,
533  };
534  
iio_trig_subirqmask(struct irq_data * d)535  static void iio_trig_subirqmask(struct irq_data *d)
536  {
537  	struct irq_chip *chip = irq_data_get_irq_chip(d);
538  	struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
539  
540  	trig->subirqs[d->irq - trig->subirq_base].enabled = false;
541  }
542  
iio_trig_subirqunmask(struct irq_data * d)543  static void iio_trig_subirqunmask(struct irq_data *d)
544  {
545  	struct irq_chip *chip = irq_data_get_irq_chip(d);
546  	struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
547  
548  	trig->subirqs[d->irq - trig->subirq_base].enabled = true;
549  }
550  
551  static __printf(3, 0)
viio_trigger_alloc(struct device * parent,struct module * this_mod,const char * fmt,va_list vargs)552  struct iio_trigger *viio_trigger_alloc(struct device *parent,
553  				       struct module *this_mod,
554  				       const char *fmt,
555  				       va_list vargs)
556  {
557  	struct iio_trigger *trig;
558  	int i;
559  
560  	trig = kzalloc(sizeof(*trig), GFP_KERNEL);
561  	if (!trig)
562  		return NULL;
563  
564  	trig->dev.parent = parent;
565  	trig->dev.type = &iio_trig_type;
566  	trig->dev.bus = &iio_bus_type;
567  	device_initialize(&trig->dev);
568  	INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
569  
570  	mutex_init(&trig->pool_lock);
571  	trig->subirq_base = irq_alloc_descs(-1, 0,
572  					    CONFIG_IIO_CONSUMERS_PER_TRIGGER,
573  					    0);
574  	if (trig->subirq_base < 0)
575  		goto free_trig;
576  
577  	trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
578  	if (trig->name == NULL)
579  		goto free_descs;
580  
581  	INIT_LIST_HEAD(&trig->list);
582  
583  	trig->owner = this_mod;
584  
585  	trig->subirq_chip.name = trig->name;
586  	trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
587  	trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
588  	for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
589  		irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
590  		irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
591  		irq_modify_status(trig->subirq_base + i,
592  				  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
593  	}
594  
595  	return trig;
596  
597  free_descs:
598  	irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
599  free_trig:
600  	kfree(trig);
601  	return NULL;
602  }
603  
604  /**
605   * __iio_trigger_alloc - Allocate a trigger
606   * @parent:		Device to allocate iio_trigger for
607   * @this_mod:		module allocating the trigger
608   * @fmt:		trigger name format. If it includes format
609   *			specifiers, the additional arguments following
610   *			format are formatted and inserted in the resulting
611   *			string replacing their respective specifiers.
612   * RETURNS:
613   * Pointer to allocated iio_trigger on success, NULL on failure.
614   */
__iio_trigger_alloc(struct device * parent,struct module * this_mod,const char * fmt,...)615  struct iio_trigger *__iio_trigger_alloc(struct device *parent,
616  					struct module *this_mod,
617  					const char *fmt, ...)
618  {
619  	struct iio_trigger *trig;
620  	va_list vargs;
621  
622  	va_start(vargs, fmt);
623  	trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
624  	va_end(vargs);
625  
626  	return trig;
627  }
628  EXPORT_SYMBOL(__iio_trigger_alloc);
629  
iio_trigger_free(struct iio_trigger * trig)630  void iio_trigger_free(struct iio_trigger *trig)
631  {
632  	if (trig)
633  		put_device(&trig->dev);
634  }
635  EXPORT_SYMBOL(iio_trigger_free);
636  
devm_iio_trigger_release(struct device * dev,void * res)637  static void devm_iio_trigger_release(struct device *dev, void *res)
638  {
639  	iio_trigger_free(*(struct iio_trigger **)res);
640  }
641  
642  /**
643   * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
644   * Managed iio_trigger_alloc.  iio_trigger allocated with this function is
645   * automatically freed on driver detach.
646   * @parent:		Device to allocate iio_trigger for
647   * @this_mod:		module allocating the trigger
648   * @fmt:		trigger name format. If it includes format
649   *			specifiers, the additional arguments following
650   *			format are formatted and inserted in the resulting
651   *			string replacing their respective specifiers.
652   *
653   *
654   * RETURNS:
655   * Pointer to allocated iio_trigger on success, NULL on failure.
656   */
__devm_iio_trigger_alloc(struct device * parent,struct module * this_mod,const char * fmt,...)657  struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
658  					     struct module *this_mod,
659  					     const char *fmt, ...)
660  {
661  	struct iio_trigger **ptr, *trig;
662  	va_list vargs;
663  
664  	ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
665  			   GFP_KERNEL);
666  	if (!ptr)
667  		return NULL;
668  
669  	/* use raw alloc_dr for kmalloc caller tracing */
670  	va_start(vargs, fmt);
671  	trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
672  	va_end(vargs);
673  	if (trig) {
674  		*ptr = trig;
675  		devres_add(parent, ptr);
676  	} else {
677  		devres_free(ptr);
678  	}
679  
680  	return trig;
681  }
682  EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
683  
devm_iio_trigger_unreg(void * trigger_info)684  static void devm_iio_trigger_unreg(void *trigger_info)
685  {
686  	iio_trigger_unregister(trigger_info);
687  }
688  
689  /**
690   * devm_iio_trigger_register - Resource-managed iio_trigger_register()
691   * @dev:	device this trigger was allocated for
692   * @trig_info:	trigger to register
693   *
694   * Managed iio_trigger_register().  The IIO trigger registered with this
695   * function is automatically unregistered on driver detach. This function
696   * calls iio_trigger_register() internally. Refer to that function for more
697   * information.
698   *
699   * RETURNS:
700   * 0 on success, negative error number on failure.
701   */
devm_iio_trigger_register(struct device * dev,struct iio_trigger * trig_info)702  int devm_iio_trigger_register(struct device *dev,
703  			      struct iio_trigger *trig_info)
704  {
705  	int ret;
706  
707  	ret = iio_trigger_register(trig_info);
708  	if (ret)
709  		return ret;
710  
711  	return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
712  }
713  EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
714  
iio_trigger_using_own(struct iio_dev * indio_dev)715  bool iio_trigger_using_own(struct iio_dev *indio_dev)
716  {
717  	return indio_dev->trig->attached_own_device;
718  }
719  EXPORT_SYMBOL(iio_trigger_using_own);
720  
721  /**
722   * iio_validate_own_trigger - Check if a trigger and IIO device belong to
723   *  the same device
724   * @idev: the IIO device to check
725   * @trig: the IIO trigger to check
726   *
727   * This function can be used as the validate_trigger callback for triggers that
728   * can only be attached to their own device.
729   *
730   * Return: 0 if both the trigger and the IIO device belong to the same
731   * device, -EINVAL otherwise.
732   */
iio_validate_own_trigger(struct iio_dev * idev,struct iio_trigger * trig)733  int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig)
734  {
735  	if (idev->dev.parent != trig->dev.parent)
736  		return -EINVAL;
737  	return 0;
738  }
739  EXPORT_SYMBOL_GPL(iio_validate_own_trigger);
740  
741  /**
742   * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
743   *  the same device
744   * @trig: The IIO trigger to check
745   * @indio_dev: the IIO device to check
746   *
747   * This function can be used as the validate_device callback for triggers that
748   * can only be attached to their own device.
749   *
750   * Return: 0 if both the trigger and the IIO device belong to the same
751   * device, -EINVAL otherwise.
752   */
iio_trigger_validate_own_device(struct iio_trigger * trig,struct iio_dev * indio_dev)753  int iio_trigger_validate_own_device(struct iio_trigger *trig,
754  				    struct iio_dev *indio_dev)
755  {
756  	if (indio_dev->dev.parent != trig->dev.parent)
757  		return -EINVAL;
758  	return 0;
759  }
760  EXPORT_SYMBOL(iio_trigger_validate_own_device);
761  
iio_device_register_trigger_consumer(struct iio_dev * indio_dev)762  int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
763  {
764  	return iio_device_register_sysfs_group(indio_dev,
765  					       &iio_trigger_consumer_attr_group);
766  }
767  
iio_device_unregister_trigger_consumer(struct iio_dev * indio_dev)768  void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
769  {
770  	/* Clean up an associated but not attached trigger reference */
771  	if (indio_dev->trig)
772  		iio_trigger_put(indio_dev->trig);
773  }
774  
iio_device_suspend_triggering(struct iio_dev * indio_dev)775  int iio_device_suspend_triggering(struct iio_dev *indio_dev)
776  {
777  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
778  
779  	guard(mutex)(&iio_dev_opaque->mlock);
780  
781  	if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
782  		disable_irq(indio_dev->pollfunc->irq);
783  
784  	return 0;
785  }
786  EXPORT_SYMBOL(iio_device_suspend_triggering);
787  
iio_device_resume_triggering(struct iio_dev * indio_dev)788  int iio_device_resume_triggering(struct iio_dev *indio_dev)
789  {
790  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
791  
792  	guard(mutex)(&iio_dev_opaque->mlock);
793  
794  	if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
795  		enable_irq(indio_dev->pollfunc->irq);
796  
797  	return 0;
798  }
799  EXPORT_SYMBOL(iio_device_resume_triggering);
800