6 * Copyright (C) 2009--2010 Nokia Corporation.
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/export.h>
33 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
36 return idx >= sev->elems ? idx - sev->elems : idx;
39 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
41 struct v4l2_kevent *kev;
44 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
46 if (list_empty(&fh->available)) {
47 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
51 WARN_ON(fh->navailable == 0);
53 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
57 kev->event.pending = fh->navailable;
59 kev->sev->first = sev_pos(kev->sev, 1);
62 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
67 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
73 return __v4l2_event_dequeue(fh, event);
75 /* Release the vdev lock while waiting */
77 mutex_unlock(fh->vdev->lock);
80 ret = wait_event_interruptible(fh->wait,
85 ret = __v4l2_event_dequeue(fh, event);
86 } while (ret == -ENOENT);
89 mutex_lock(fh->vdev->lock);
93 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
95 /* Caller must hold fh->vdev->fh_lock! */
96 static struct v4l2_subscribed_event *v4l2_event_subscribed(
97 struct v4l2_fh *fh, u32 type, u32 id)
99 struct v4l2_subscribed_event *sev;
101 assert_spin_locked(&fh->vdev->fh_lock);
103 list_for_each_entry(sev, &fh->subscribed, list)
104 if (sev->type == type && sev->id == id)
110 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
111 const struct timespec *ts)
113 struct v4l2_subscribed_event *sev;
114 struct v4l2_kevent *kev;
115 bool copy_payload = true;
117 /* Are we subscribed? */
118 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
122 /* Increase event sequence number on fh. */
125 /* Do we have any free events? */
126 if (sev->in_use == sev->elems) {
127 /* no, remove the oldest one */
128 kev = sev->events + sev_pos(sev, 0);
129 list_del(&kev->list);
131 sev->first = sev_pos(sev, 1);
133 if (sev->elems == 1) {
134 if (sev->ops && sev->ops->replace) {
135 sev->ops->replace(&kev->event, ev);
136 copy_payload = false;
138 } else if (sev->ops && sev->ops->merge) {
139 struct v4l2_kevent *second_oldest =
140 sev->events + sev_pos(sev, 0);
141 sev->ops->merge(&kev->event, &second_oldest->event);
145 /* Take one and fill it. */
146 kev = sev->events + sev_pos(sev, sev->in_use);
147 kev->event.type = ev->type;
149 kev->event.u = ev->u;
150 kev->event.id = ev->id;
151 kev->event.timestamp = *ts;
152 kev->event.sequence = fh->sequence;
154 list_add_tail(&kev->list, &fh->available);
158 wake_up_all(&fh->wait);
161 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
165 struct timespec timestamp;
170 ktime_get_ts(×tamp);
172 spin_lock_irqsave(&vdev->fh_lock, flags);
174 list_for_each_entry(fh, &vdev->fh_list, list)
175 __v4l2_event_queue_fh(fh, ev, ×tamp);
177 spin_unlock_irqrestore(&vdev->fh_lock, flags);
179 EXPORT_SYMBOL_GPL(v4l2_event_queue);
181 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
184 struct timespec timestamp;
186 ktime_get_ts(×tamp);
188 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
189 __v4l2_event_queue_fh(fh, ev, ×tamp);
190 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
192 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
194 int v4l2_event_pending(struct v4l2_fh *fh)
196 return fh->navailable;
198 EXPORT_SYMBOL_GPL(v4l2_event_pending);
200 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
202 struct v4l2_fh *fh = sev->fh;
205 lockdep_assert_held(&fh->subscribe_lock);
206 assert_spin_locked(&fh->vdev->fh_lock);
208 /* Remove any pending events for this subscription */
209 for (i = 0; i < sev->in_use; i++) {
210 list_del(&sev->events[sev_pos(sev, i)].list);
213 list_del(&sev->list);
216 int v4l2_event_subscribe(struct v4l2_fh *fh,
217 const struct v4l2_event_subscription *sub, unsigned elems,
218 const struct v4l2_subscribed_event_ops *ops)
220 struct v4l2_subscribed_event *sev, *found_ev;
225 if (sub->type == V4L2_EVENT_ALL)
231 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
234 for (i = 0; i < elems; i++)
235 sev->events[i].sev = sev;
236 sev->type = sub->type;
238 sev->flags = sub->flags;
243 mutex_lock(&fh->subscribe_lock);
245 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
246 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
248 list_add(&sev->list, &fh->subscribed);
249 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
252 /* Already listening */
254 } else if (sev->ops && sev->ops->add) {
255 ret = sev->ops->add(sev, elems);
257 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
258 __v4l2_event_unsubscribe(sev);
259 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
264 mutex_unlock(&fh->subscribe_lock);
268 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
270 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
272 struct v4l2_event_subscription sub;
273 struct v4l2_subscribed_event *sev;
279 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
280 if (!list_empty(&fh->subscribed)) {
281 sev = list_first_entry(&fh->subscribed,
282 struct v4l2_subscribed_event, list);
283 sub.type = sev->type;
286 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
288 v4l2_event_unsubscribe(fh, &sub);
291 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
293 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
294 const struct v4l2_event_subscription *sub)
296 struct v4l2_subscribed_event *sev;
299 if (sub->type == V4L2_EVENT_ALL) {
300 v4l2_event_unsubscribe_all(fh);
304 mutex_lock(&fh->subscribe_lock);
306 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
308 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
310 __v4l2_event_unsubscribe(sev);
312 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
314 if (sev && sev->ops && sev->ops->del)
317 mutex_unlock(&fh->subscribe_lock);
323 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
325 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
326 struct v4l2_event_subscription *sub)
328 return v4l2_event_unsubscribe(fh, sub);
330 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
332 static void v4l2_event_src_replace(struct v4l2_event *old,
333 const struct v4l2_event *new)
335 u32 old_changes = old->u.src_change.changes;
337 old->u.src_change = new->u.src_change;
338 old->u.src_change.changes |= old_changes;
341 static void v4l2_event_src_merge(const struct v4l2_event *old,
342 struct v4l2_event *new)
344 new->u.src_change.changes |= old->u.src_change.changes;
347 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
348 .replace = v4l2_event_src_replace,
349 .merge = v4l2_event_src_merge,
352 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
353 const struct v4l2_event_subscription *sub)
355 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
356 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
359 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
361 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
362 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
364 return v4l2_src_change_event_subscribe(fh, sub);
366 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);