Release 4.11 drivers/media/v4l2-core/v4l2-event.c
/*
* v4l2-event.c
*
* V4L2 events.
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/export.h>
static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
{
idx += sev->first;
return idx >= sev->elems ? idx - sev->elems : idx;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Verkuil | 22 | 61.11% | 1 | 50.00% |
Sakari Ailus | 14 | 38.89% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
struct v4l2_kevent *kev;
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
if (list_empty(&fh->available)) {
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return -ENOENT;
}
WARN_ON(fh->navailable == 0);
kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
list_del(&kev->list);
fh->navailable--;
kev->event.pending = fh->navailable;
*event = kev->event;
kev->sev->first = sev_pos(kev->sev, 1);
kev->sev->in_use--;
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sakari Ailus | 128 | 82.05% | 1 | 33.33% |
Hans Verkuil | 28 | 17.95% | 2 | 66.67% |
Total | 156 | 100.00% | 3 | 100.00% |
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
int nonblocking)
{
int ret;
if (nonblocking)
return __v4l2_event_dequeue(fh, event);
/* Release the vdev lock while waiting */
if (fh->vdev->lock)
mutex_unlock(fh->vdev->lock);
do {
ret = wait_event_interruptible(fh->wait,
fh->navailable != 0);
if (ret < 0)
break;
ret = __v4l2_event_dequeue(fh, event);
} while (ret == -ENOENT);
if (fh->vdev->lock)
mutex_lock(fh->vdev->lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sakari Ailus | 75 | 66.37% | 1 | 33.33% |
Hans Verkuil | 38 | 33.63% | 2 | 66.67% |
Total | 113 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
/* Caller must hold fh->vdev->fh_lock! */
static struct v4l2_subscribed_event *v4l2_event_subscribed(
struct v4l2_fh *fh, u32 type, u32 id)
{
struct v4l2_subscribed_event *sev;
assert_spin_locked(&fh->vdev->fh_lock);
list_for_each_entry(sev, &fh->subscribed, list)
if (sev->type == type && sev->id == id)
return sev;
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sakari Ailus | 52 | 83.87% | 2 | 50.00% |
Hans Verkuil | 10 | 16.13% | 2 | 50.00% |
Total | 62 | 100.00% | 4 | 100.00% |
static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
const struct timespec *ts)
{
struct v4l2_subscribed_event *sev;
struct v4l2_kevent *kev;
bool copy_payload = true;
/* Are we subscribed? */
sev = v4l2_event_subscribed(fh, ev->type, ev->id);
if (sev == NULL)
return;
/*
* If the event has been added to the fh->subscribed list, but its
* add op has not completed yet elems will be 0, treat this as
* not being subscribed.
*/
if (!sev->elems)
return;
/* Increase event sequence number on fh. */
fh->sequence++;
/* Do we have any free events? */
if (sev->in_use == sev->elems) {
/* no, remove the oldest one */
kev = sev->events + sev_pos(sev, 0);
list_del(&kev->list);
sev->in_use--;
sev->first = sev_pos(sev, 1);
fh->navailable--;
if (sev->elems == 1) {
if (sev->ops && sev->ops->replace) {
sev->ops->replace(&kev->event, ev);
copy_payload = false;
}
} else if (sev->ops && sev->ops->merge) {
struct v4l2_kevent *second_oldest =
sev->events + sev_pos(sev, 0);
sev->ops->merge(&kev->event, &second_oldest->event);
}
}
/* Take one and fill it. */
kev = sev->events + sev_pos(sev, sev->in_use);
kev->event.type = ev->type;
if (copy_payload)
kev->event.u = ev->u;
kev->event.id = ev->id;
kev->event.timestamp = *ts;
kev->event.sequence = fh->sequence;
sev->in_use++;
list_add_tail(&kev->list, &fh->available);
fh->navailable++;
wake_up_all(&fh->wait);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Verkuil | 191 | 59.32% | 4 | 66.67% |
Sakari Ailus | 106 | 32.92% | 1 | 16.67% |
Hans de Goede | 25 | 7.76% | 1 | 16.67% |
Total | 322 | 100.00% | 6 | 100.00% |
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
struct v4l2_fh *fh;
unsigned long flags;
struct timespec timestamp;
if (vdev == NULL)
return;
ktime_get_ts(×tamp);
spin_lock_irqsave(&vdev->fh_lock, flags);
list_for_each_entry(fh, &vdev->fh_list, list)
__v4l2_event_queue_fh(fh, ev, ×tamp);
spin_unlock_irqrestore(&vdev->fh_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Verkuil | 69 | 86.25% | 2 | 66.67% |
Sakari Ailus | 11 | 13.75% | 1 | 33.33% |
Total | 80 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_queue);
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
{
unsigned long flags;
struct timespec timestamp;
ktime_get_ts(×tamp);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
__v4l2_event_queue_fh(fh, ev, ×tamp);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Verkuil | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
int v4l2_event_pending(struct v4l2_fh *fh)
{
return fh->navailable;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sakari Ailus | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_pending);
int v4l2_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub, unsigned elems,
const struct v4l2_subscribed_event_ops *ops)
{
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
if (elems < 1)
elems = 1;
sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
if (!sev)
return -ENOMEM;
for (i = 0; i < elems; i++)
sev->events[i].sev = sev;
sev->type = sub->type;
sev->id = sub->id;
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
kfree(sev);
return 0; /* Already listening */
}
if (sev->ops && sev->ops->add) {
int ret = sev->ops->add(sev, elems);
if (ret) {
sev->ops = NULL;
v4l2_event_unsubscribe(fh, sub);
return ret;
}
}
/* Mark as ready for use */
sev->elems = elems;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Verkuil | 121 | 42.61% | 6 | 66.67% |
Sakari Ailus | 82 | 28.87% | 1 | 11.11% |
Hans de Goede | 81 | 28.52% | 2 | 22.22% |
Total | 284 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
{
struct v4l2_event_subscription sub;
struct v4l2_subscribed_event *sev;
unsigned long flags;
do {
sev = NULL;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
if (!list_empty(&fh->subscribed)) {
sev = list_first_entry(&fh->subscribed,
struct v4l2_subscribed_event, list);
sub.type = sev->type;
sub.id = sev->id;
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev)
v4l2_event_unsubscribe(fh, &sub);
} while (sev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sakari Ailus | 87 | 75.65% | 1 | 33.33% |
Hans Verkuil | 28 | 24.35% | 2 | 66.67% |
Total | 115 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
return 0;
}
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (sev != NULL) {
/* Remove any pending events for this subscription */
for (i = 0; i < sev->in_use; i++) {
list_del(&sev->events[sev_pos(sev, i)].list);
fh->navailable--;
}
list_del(&sev->list);
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
kfree(sev);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sakari Ailus | 104 | 60.12% | 1 | 16.67% |
Hans de Goede | 53 | 30.64% | 2 | 33.33% |
Hans Verkuil | 16 | 9.25% | 3 | 50.00% |
Total | 173 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sylwester Nawrocki | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
static void v4l2_event_src_replace(struct v4l2_event *old,
const struct v4l2_event *new)
{
u32 old_changes = old->u.src_change.changes;
old->u.src_change = new->u.src_change;
old->u.src_change.changes |= old_changes;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Kumar K | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
static void v4l2_event_src_merge(const struct v4l2_event *old,
struct v4l2_event *new)
{
new->u.src_change.changes |= old->u.src_change.changes;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Kumar K | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
.replace = v4l2_event_src_replace,
.merge = v4l2_event_src_merge,
};
int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Kumar K | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
{
return v4l2_src_change_event_subscribe(fh, sub);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Kumar K | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sakari Ailus | 710 | 42.06% | 3 | 15.79% |
Hans Verkuil | 598 | 35.43% | 9 | 47.37% |
Arun Kumar K | 180 | 10.66% | 1 | 5.26% |
Hans de Goede | 159 | 9.42% | 3 | 15.79% |
Sylwester Nawrocki | 33 | 1.95% | 1 | 5.26% |
Laurent Pinchart | 5 | 0.30% | 1 | 5.26% |
Paul Gortmaker | 3 | 0.18% | 1 | 5.26% |
Total | 1688 | 100.00% | 19 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.