1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * v4l2-event.c
4   *
5   * V4L2 events.
6   *
7   * Copyright (C) 2009--2010 Nokia Corporation.
8   *
9   * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10   */
11  
12  #include <media/v4l2-dev.h>
13  #include <media/v4l2-fh.h>
14  #include <media/v4l2-event.h>
15  
16  #include <linux/mm.h>
17  #include <linux/sched.h>
18  #include <linux/slab.h>
19  #include <linux/export.h>
20  
sev_pos(const struct v4l2_subscribed_event * sev,unsigned int idx)21  static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
22  {
23  	idx += sev->first;
24  	return idx >= sev->elems ? idx - sev->elems : idx;
25  }
26  
__v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event)27  static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
28  {
29  	struct v4l2_kevent *kev;
30  	struct timespec64 ts;
31  	unsigned long flags;
32  
33  	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
34  
35  	if (list_empty(&fh->available)) {
36  		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
37  		return -ENOENT;
38  	}
39  
40  	WARN_ON(fh->navailable == 0);
41  
42  	kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
43  	list_del(&kev->list);
44  	fh->navailable--;
45  
46  	kev->event.pending = fh->navailable;
47  	*event = kev->event;
48  	ts = ns_to_timespec64(kev->ts);
49  	event->timestamp.tv_sec = ts.tv_sec;
50  	event->timestamp.tv_nsec = ts.tv_nsec;
51  	kev->sev->first = sev_pos(kev->sev, 1);
52  	kev->sev->in_use--;
53  
54  	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
55  
56  	return 0;
57  }
58  
v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event,int nonblocking)59  int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
60  		       int nonblocking)
61  {
62  	int ret;
63  
64  	if (nonblocking)
65  		return __v4l2_event_dequeue(fh, event);
66  
67  	/* Release the vdev lock while waiting */
68  	if (fh->vdev->lock)
69  		mutex_unlock(fh->vdev->lock);
70  
71  	do {
72  		ret = wait_event_interruptible(fh->wait,
73  					       fh->navailable != 0);
74  		if (ret < 0)
75  			break;
76  
77  		ret = __v4l2_event_dequeue(fh, event);
78  	} while (ret == -ENOENT);
79  
80  	if (fh->vdev->lock)
81  		mutex_lock(fh->vdev->lock);
82  
83  	return ret;
84  }
85  EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
86  
87  /* Caller must hold fh->vdev->fh_lock! */
v4l2_event_subscribed(struct v4l2_fh * fh,u32 type,u32 id)88  static struct v4l2_subscribed_event *v4l2_event_subscribed(
89  		struct v4l2_fh *fh, u32 type, u32 id)
90  {
91  	struct v4l2_subscribed_event *sev;
92  
93  	assert_spin_locked(&fh->vdev->fh_lock);
94  
95  	list_for_each_entry(sev, &fh->subscribed, list)
96  		if (sev->type == type && sev->id == id)
97  			return sev;
98  
99  	return NULL;
100  }
101  
__v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev,u64 ts)102  static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
103  				  const struct v4l2_event *ev, u64 ts)
104  {
105  	struct v4l2_subscribed_event *sev;
106  	struct v4l2_kevent *kev;
107  	bool copy_payload = true;
108  
109  	/* Are we subscribed? */
110  	sev = v4l2_event_subscribed(fh, ev->type, ev->id);
111  	if (sev == NULL)
112  		return;
113  
114  	/* Increase event sequence number on fh. */
115  	fh->sequence++;
116  
117  	/* Do we have any free events? */
118  	if (sev->in_use == sev->elems) {
119  		/* no, remove the oldest one */
120  		kev = sev->events + sev_pos(sev, 0);
121  		list_del(&kev->list);
122  		sev->in_use--;
123  		sev->first = sev_pos(sev, 1);
124  		fh->navailable--;
125  		if (sev->elems == 1) {
126  			if (sev->ops && sev->ops->replace) {
127  				sev->ops->replace(&kev->event, ev);
128  				copy_payload = false;
129  			}
130  		} else if (sev->ops && sev->ops->merge) {
131  			struct v4l2_kevent *second_oldest =
132  				sev->events + sev_pos(sev, 0);
133  			sev->ops->merge(&kev->event, &second_oldest->event);
134  		}
135  	}
136  
137  	/* Take one and fill it. */
138  	kev = sev->events + sev_pos(sev, sev->in_use);
139  	kev->event.type = ev->type;
140  	if (copy_payload)
141  		kev->event.u = ev->u;
142  	kev->event.id = ev->id;
143  	kev->ts = ts;
144  	kev->event.sequence = fh->sequence;
145  	sev->in_use++;
146  	list_add_tail(&kev->list, &fh->available);
147  
148  	fh->navailable++;
149  
150  	wake_up_all(&fh->wait);
151  }
152  
v4l2_event_queue(struct video_device * vdev,const struct v4l2_event * ev)153  void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
154  {
155  	struct v4l2_fh *fh;
156  	unsigned long flags;
157  	u64 ts;
158  
159  	if (vdev == NULL)
160  		return;
161  
162  	ts = ktime_get_ns();
163  
164  	spin_lock_irqsave(&vdev->fh_lock, flags);
165  
166  	list_for_each_entry(fh, &vdev->fh_list, list)
167  		__v4l2_event_queue_fh(fh, ev, ts);
168  
169  	spin_unlock_irqrestore(&vdev->fh_lock, flags);
170  }
171  EXPORT_SYMBOL_GPL(v4l2_event_queue);
172  
v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev)173  void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
174  {
175  	unsigned long flags;
176  	u64 ts = ktime_get_ns();
177  
178  	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
179  	__v4l2_event_queue_fh(fh, ev, ts);
180  	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
181  }
182  EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
183  
v4l2_event_pending(struct v4l2_fh * fh)184  int v4l2_event_pending(struct v4l2_fh *fh)
185  {
186  	return fh->navailable;
187  }
188  EXPORT_SYMBOL_GPL(v4l2_event_pending);
189  
v4l2_event_wake_all(struct video_device * vdev)190  void v4l2_event_wake_all(struct video_device *vdev)
191  {
192  	struct v4l2_fh *fh;
193  	unsigned long flags;
194  
195  	if (!vdev)
196  		return;
197  
198  	spin_lock_irqsave(&vdev->fh_lock, flags);
199  
200  	list_for_each_entry(fh, &vdev->fh_list, list)
201  		wake_up_all(&fh->wait);
202  
203  	spin_unlock_irqrestore(&vdev->fh_lock, flags);
204  }
205  EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
206  
__v4l2_event_unsubscribe(struct v4l2_subscribed_event * sev)207  static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
208  {
209  	struct v4l2_fh *fh = sev->fh;
210  	unsigned int i;
211  
212  	lockdep_assert_held(&fh->subscribe_lock);
213  	assert_spin_locked(&fh->vdev->fh_lock);
214  
215  	/* Remove any pending events for this subscription */
216  	for (i = 0; i < sev->in_use; i++) {
217  		list_del(&sev->events[sev_pos(sev, i)].list);
218  		fh->navailable--;
219  	}
220  	list_del(&sev->list);
221  }
222  
v4l2_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub,unsigned int elems,const struct v4l2_subscribed_event_ops * ops)223  int v4l2_event_subscribe(struct v4l2_fh *fh,
224  			 const struct v4l2_event_subscription *sub, unsigned int elems,
225  			 const struct v4l2_subscribed_event_ops *ops)
226  {
227  	struct v4l2_subscribed_event *sev, *found_ev;
228  	unsigned long flags;
229  	unsigned int i;
230  	int ret = 0;
231  
232  	if (sub->type == V4L2_EVENT_ALL)
233  		return -EINVAL;
234  
235  	if (elems < 1)
236  		elems = 1;
237  
238  	sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
239  	if (!sev)
240  		return -ENOMEM;
241  	sev->elems = elems;
242  	for (i = 0; i < elems; i++)
243  		sev->events[i].sev = sev;
244  	sev->type = sub->type;
245  	sev->id = sub->id;
246  	sev->flags = sub->flags;
247  	sev->fh = fh;
248  	sev->ops = ops;
249  
250  	mutex_lock(&fh->subscribe_lock);
251  
252  	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
253  	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
254  	if (!found_ev)
255  		list_add(&sev->list, &fh->subscribed);
256  	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
257  
258  	if (found_ev) {
259  		/* Already listening */
260  		kvfree(sev);
261  	} else if (sev->ops && sev->ops->add) {
262  		ret = sev->ops->add(sev, elems);
263  		if (ret) {
264  			spin_lock_irqsave(&fh->vdev->fh_lock, flags);
265  			__v4l2_event_unsubscribe(sev);
266  			spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
267  			kvfree(sev);
268  		}
269  	}
270  
271  	mutex_unlock(&fh->subscribe_lock);
272  
273  	return ret;
274  }
275  EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
276  
v4l2_event_unsubscribe_all(struct v4l2_fh * fh)277  void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
278  {
279  	struct v4l2_event_subscription sub;
280  	struct v4l2_subscribed_event *sev;
281  	unsigned long flags;
282  
283  	do {
284  		sev = NULL;
285  
286  		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
287  		if (!list_empty(&fh->subscribed)) {
288  			sev = list_first_entry(&fh->subscribed,
289  					struct v4l2_subscribed_event, list);
290  			sub.type = sev->type;
291  			sub.id = sev->id;
292  		}
293  		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
294  		if (sev)
295  			v4l2_event_unsubscribe(fh, &sub);
296  	} while (sev);
297  }
298  EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
299  
v4l2_event_unsubscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)300  int v4l2_event_unsubscribe(struct v4l2_fh *fh,
301  			   const struct v4l2_event_subscription *sub)
302  {
303  	struct v4l2_subscribed_event *sev;
304  	unsigned long flags;
305  
306  	if (sub->type == V4L2_EVENT_ALL) {
307  		v4l2_event_unsubscribe_all(fh);
308  		return 0;
309  	}
310  
311  	mutex_lock(&fh->subscribe_lock);
312  
313  	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
314  
315  	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
316  	if (sev != NULL)
317  		__v4l2_event_unsubscribe(sev);
318  
319  	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
320  
321  	if (sev && sev->ops && sev->ops->del)
322  		sev->ops->del(sev);
323  
324  	mutex_unlock(&fh->subscribe_lock);
325  
326  	kvfree(sev);
327  
328  	return 0;
329  }
330  EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
331  
v4l2_event_subdev_unsubscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)332  int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
333  				  struct v4l2_event_subscription *sub)
334  {
335  	return v4l2_event_unsubscribe(fh, sub);
336  }
337  EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
338  
v4l2_event_src_replace(struct v4l2_event * old,const struct v4l2_event * new)339  static void v4l2_event_src_replace(struct v4l2_event *old,
340  				const struct v4l2_event *new)
341  {
342  	u32 old_changes = old->u.src_change.changes;
343  
344  	old->u.src_change = new->u.src_change;
345  	old->u.src_change.changes |= old_changes;
346  }
347  
v4l2_event_src_merge(const struct v4l2_event * old,struct v4l2_event * new)348  static void v4l2_event_src_merge(const struct v4l2_event *old,
349  				struct v4l2_event *new)
350  {
351  	new->u.src_change.changes |= old->u.src_change.changes;
352  }
353  
354  static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
355  	.replace = v4l2_event_src_replace,
356  	.merge = v4l2_event_src_merge,
357  };
358  
v4l2_src_change_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)359  int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
360  				const struct v4l2_event_subscription *sub)
361  {
362  	if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
363  		return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
364  	return -EINVAL;
365  }
366  EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
367  
v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)368  int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
369  		struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
370  {
371  	return v4l2_src_change_event_subscribe(fh, sub);
372  }
373  EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
374