Merge tag 'linux-kselftest-kunit-5.11-rc1' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / drivers / media / v4l2-core / v4l2-event.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * v4l2-event.c
4  *
5  * V4L2 events.
6  *
7  * Copyright (C) 2009--2010 Nokia Corporation.
8  *
9  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10  */
11
12 #include <media/v4l2-dev.h>
13 #include <media/v4l2-fh.h>
14 #include <media/v4l2-event.h>
15
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/export.h>
20
21 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
22 {
23         idx += sev->first;
24         return idx >= sev->elems ? idx - sev->elems : idx;
25 }
26
27 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
28 {
29         struct v4l2_kevent *kev;
30         struct timespec64 ts;
31         unsigned long flags;
32
33         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
34
35         if (list_empty(&fh->available)) {
36                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
37                 return -ENOENT;
38         }
39
40         WARN_ON(fh->navailable == 0);
41
42         kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
43         list_del(&kev->list);
44         fh->navailable--;
45
46         kev->event.pending = fh->navailable;
47         *event = kev->event;
48         ts = ns_to_timespec64(kev->ts);
49         event->timestamp.tv_sec = ts.tv_sec;
50         event->timestamp.tv_nsec = ts.tv_nsec;
51         kev->sev->first = sev_pos(kev->sev, 1);
52         kev->sev->in_use--;
53
54         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
55
56         return 0;
57 }
58
59 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
60                        int nonblocking)
61 {
62         int ret;
63
64         if (nonblocking)
65                 return __v4l2_event_dequeue(fh, event);
66
67         /* Release the vdev lock while waiting */
68         if (fh->vdev->lock)
69                 mutex_unlock(fh->vdev->lock);
70
71         do {
72                 ret = wait_event_interruptible(fh->wait,
73                                                fh->navailable != 0);
74                 if (ret < 0)
75                         break;
76
77                 ret = __v4l2_event_dequeue(fh, event);
78         } while (ret == -ENOENT);
79
80         if (fh->vdev->lock)
81                 mutex_lock(fh->vdev->lock);
82
83         return ret;
84 }
85 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
86
87 /* Caller must hold fh->vdev->fh_lock! */
88 static struct v4l2_subscribed_event *v4l2_event_subscribed(
89                 struct v4l2_fh *fh, u32 type, u32 id)
90 {
91         struct v4l2_subscribed_event *sev;
92
93         assert_spin_locked(&fh->vdev->fh_lock);
94
95         list_for_each_entry(sev, &fh->subscribed, list)
96                 if (sev->type == type && sev->id == id)
97                         return sev;
98
99         return NULL;
100 }
101
102 static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
103                                   const struct v4l2_event *ev, u64 ts)
104 {
105         struct v4l2_subscribed_event *sev;
106         struct v4l2_kevent *kev;
107         bool copy_payload = true;
108
109         /* Are we subscribed? */
110         sev = v4l2_event_subscribed(fh, ev->type, ev->id);
111         if (sev == NULL)
112                 return;
113
114         /* Increase event sequence number on fh. */
115         fh->sequence++;
116
117         /* Do we have any free events? */
118         if (sev->in_use == sev->elems) {
119                 /* no, remove the oldest one */
120                 kev = sev->events + sev_pos(sev, 0);
121                 list_del(&kev->list);
122                 sev->in_use--;
123                 sev->first = sev_pos(sev, 1);
124                 fh->navailable--;
125                 if (sev->elems == 1) {
126                         if (sev->ops && sev->ops->replace) {
127                                 sev->ops->replace(&kev->event, ev);
128                                 copy_payload = false;
129                         }
130                 } else if (sev->ops && sev->ops->merge) {
131                         struct v4l2_kevent *second_oldest =
132                                 sev->events + sev_pos(sev, 0);
133                         sev->ops->merge(&kev->event, &second_oldest->event);
134                 }
135         }
136
137         /* Take one and fill it. */
138         kev = sev->events + sev_pos(sev, sev->in_use);
139         kev->event.type = ev->type;
140         if (copy_payload)
141                 kev->event.u = ev->u;
142         kev->event.id = ev->id;
143         kev->ts = ts;
144         kev->event.sequence = fh->sequence;
145         sev->in_use++;
146         list_add_tail(&kev->list, &fh->available);
147
148         fh->navailable++;
149
150         wake_up_all(&fh->wait);
151 }
152
153 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
154 {
155         struct v4l2_fh *fh;
156         unsigned long flags;
157         u64 ts;
158
159         if (vdev == NULL)
160                 return;
161
162         ts = ktime_get_ns();
163
164         spin_lock_irqsave(&vdev->fh_lock, flags);
165
166         list_for_each_entry(fh, &vdev->fh_list, list)
167                 __v4l2_event_queue_fh(fh, ev, ts);
168
169         spin_unlock_irqrestore(&vdev->fh_lock, flags);
170 }
171 EXPORT_SYMBOL_GPL(v4l2_event_queue);
172
173 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
174 {
175         unsigned long flags;
176         u64 ts = ktime_get_ns();
177
178         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
179         __v4l2_event_queue_fh(fh, ev, ts);
180         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
181 }
182 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
183
184 int v4l2_event_pending(struct v4l2_fh *fh)
185 {
186         return fh->navailable;
187 }
188 EXPORT_SYMBOL_GPL(v4l2_event_pending);
189
190 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
191 {
192         struct v4l2_fh *fh = sev->fh;
193         unsigned int i;
194
195         lockdep_assert_held(&fh->subscribe_lock);
196         assert_spin_locked(&fh->vdev->fh_lock);
197
198         /* Remove any pending events for this subscription */
199         for (i = 0; i < sev->in_use; i++) {
200                 list_del(&sev->events[sev_pos(sev, i)].list);
201                 fh->navailable--;
202         }
203         list_del(&sev->list);
204 }
205
206 int v4l2_event_subscribe(struct v4l2_fh *fh,
207                          const struct v4l2_event_subscription *sub, unsigned elems,
208                          const struct v4l2_subscribed_event_ops *ops)
209 {
210         struct v4l2_subscribed_event *sev, *found_ev;
211         unsigned long flags;
212         unsigned i;
213         int ret = 0;
214
215         if (sub->type == V4L2_EVENT_ALL)
216                 return -EINVAL;
217
218         if (elems < 1)
219                 elems = 1;
220
221         sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
222         if (!sev)
223                 return -ENOMEM;
224         for (i = 0; i < elems; i++)
225                 sev->events[i].sev = sev;
226         sev->type = sub->type;
227         sev->id = sub->id;
228         sev->flags = sub->flags;
229         sev->fh = fh;
230         sev->ops = ops;
231         sev->elems = elems;
232
233         mutex_lock(&fh->subscribe_lock);
234
235         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
236         found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
237         if (!found_ev)
238                 list_add(&sev->list, &fh->subscribed);
239         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
240
241         if (found_ev) {
242                 /* Already listening */
243                 kvfree(sev);
244         } else if (sev->ops && sev->ops->add) {
245                 ret = sev->ops->add(sev, elems);
246                 if (ret) {
247                         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
248                         __v4l2_event_unsubscribe(sev);
249                         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
250                         kvfree(sev);
251                 }
252         }
253
254         mutex_unlock(&fh->subscribe_lock);
255
256         return ret;
257 }
258 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
259
260 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
261 {
262         struct v4l2_event_subscription sub;
263         struct v4l2_subscribed_event *sev;
264         unsigned long flags;
265
266         do {
267                 sev = NULL;
268
269                 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
270                 if (!list_empty(&fh->subscribed)) {
271                         sev = list_first_entry(&fh->subscribed,
272                                         struct v4l2_subscribed_event, list);
273                         sub.type = sev->type;
274                         sub.id = sev->id;
275                 }
276                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
277                 if (sev)
278                         v4l2_event_unsubscribe(fh, &sub);
279         } while (sev);
280 }
281 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
282
283 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
284                            const struct v4l2_event_subscription *sub)
285 {
286         struct v4l2_subscribed_event *sev;
287         unsigned long flags;
288
289         if (sub->type == V4L2_EVENT_ALL) {
290                 v4l2_event_unsubscribe_all(fh);
291                 return 0;
292         }
293
294         mutex_lock(&fh->subscribe_lock);
295
296         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
297
298         sev = v4l2_event_subscribed(fh, sub->type, sub->id);
299         if (sev != NULL)
300                 __v4l2_event_unsubscribe(sev);
301
302         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
303
304         if (sev && sev->ops && sev->ops->del)
305                 sev->ops->del(sev);
306
307         mutex_unlock(&fh->subscribe_lock);
308
309         kvfree(sev);
310
311         return 0;
312 }
313 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
314
315 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
316                                   struct v4l2_event_subscription *sub)
317 {
318         return v4l2_event_unsubscribe(fh, sub);
319 }
320 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
321
322 static void v4l2_event_src_replace(struct v4l2_event *old,
323                                 const struct v4l2_event *new)
324 {
325         u32 old_changes = old->u.src_change.changes;
326
327         old->u.src_change = new->u.src_change;
328         old->u.src_change.changes |= old_changes;
329 }
330
331 static void v4l2_event_src_merge(const struct v4l2_event *old,
332                                 struct v4l2_event *new)
333 {
334         new->u.src_change.changes |= old->u.src_change.changes;
335 }
336
337 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
338         .replace = v4l2_event_src_replace,
339         .merge = v4l2_event_src_merge,
340 };
341
342 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
343                                 const struct v4l2_event_subscription *sub)
344 {
345         if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
346                 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
347         return -EINVAL;
348 }
349 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
350
351 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
352                 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
353 {
354         return v4l2_src_change_event_subscribe(fh, sub);
355 }
356 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);