Release 4.7 drivers/media/rc/rc-ir-raw.c
/* rc-ir-raw.c - handle IR pulse/space events
*
* Copyright (C) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/freezer.h>
#include "rc-core-priv.h"
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
static int ir_raw_event_thread(void *data)
{
struct ir_raw_event ev;
struct ir_raw_handler *handler;
struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
while (!kthread_should_stop()) {
spin_lock_irq(&raw->lock);
if (!kfifo_len(&raw->kfifo)) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
set_current_state(TASK_RUNNING);
spin_unlock_irq(&raw->lock);
schedule();
continue;
}
if(!kfifo_out(&raw->kfifo, &ev, 1))
dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
spin_unlock_irq(&raw->lock);
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (raw->dev->enabled_protocols & handler->protocols ||
!handler->protocols)
handler->decode(raw->dev, ev);
raw->prev_ev = ev;
mutex_unlock(&ir_raw_handler_lock);
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maxim levitsky | maxim levitsky | 73 | 40.56% | 3 | 30.00% |
david hardeman | david hardeman | 57 | 31.67% | 4 | 40.00% |
heiner kallweit | heiner kallweit | 35 | 19.44% | 2 | 20.00% |
srinivas kandagatla | srinivas kandagatla | 15 | 8.33% | 1 | 10.00% |
| Total | 180 | 100.00% | 10 | 100.00% |
/**
* ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
* @dev: the struct rc_dev device descriptor
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This routine (which may be called from an interrupt context) stores a
* pulse/space duration for the raw ir decoding state machines. Pulses are
* signalled as positive values and spaces as negative values. A zero value
* will reset the decoding state machines.
*/
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
{
if (!dev->raw)
return -EINVAL;
IR_dprintk(2, "sample: (%05dus %s)\n",
TO_US(ev->duration), TO_STR(ev->pulse));
if (!kfifo_put(&dev->raw->kfifo, *ev)) {
dev_err(&dev->dev, "IR event FIFO is full!\n");
return -ENOSPC;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mauro carvalho chehab | mauro carvalho chehab | 33 | 40.24% | 2 | 28.57% |
maxim levitsky | maxim levitsky | 20 | 24.39% | 1 | 14.29% |
david hardeman | david hardeman | 15 | 18.29% | 3 | 42.86% |
heiner kallweit | heiner kallweit | 14 | 17.07% | 1 | 14.29% |
| Total | 82 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(ir_raw_event_store);
/**
* ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
* @dev: the struct rc_dev device descriptor
* @type: the type of the event that has occurred
*
* This routine (which may be called from an interrupt context) is used to
* store the beginning of an ir pulse or space (or the start/end of ir
* reception) for the raw ir decoding state machines. This is used by
* hardware which does not provide durations directly but only interrupts
* (or similar events) on state change.
*/
int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
{
ktime_t now;
s64 delta; /* ns */
DEFINE_IR_RAW_EVENT(ev);
int rc = 0;
int delay;
if (!dev->raw)
return -EINVAL;
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
/* Check for a long duration since last event or if we're
* being called for the first time, note that delta can't
* possibly be negative.
*/
if (delta > delay || !dev->raw->last_type)
type |= IR_START_EVENT;
else
ev.duration = delta;
if (type & IR_START_EVENT)
ir_raw_event_reset(dev);
else if (dev->raw->last_type & IR_SPACE) {
ev.pulse = false;
rc = ir_raw_event_store(dev, &ev);
} else if (dev->raw->last_type & IR_PULSE) {
ev.pulse = true;
rc = ir_raw_event_store(dev, &ev);
} else
return 0;
dev->raw->last_event = now;
dev->raw->last_type = type;
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hardeman | david hardeman | 134 | 68.02% | 3 | 50.00% |
mauro carvalho chehab | mauro carvalho chehab | 45 | 22.84% | 2 | 33.33% |
jarod wilson | jarod wilson | 18 | 9.14% | 1 | 16.67% |
| Total | 197 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
/**
* ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
* @dev: the struct rc_dev device descriptor
* @type: the type of the event that has occurred
*
* This routine (which may be called from an interrupt context) works
* in similar manner to ir_raw_event_store_edge.
* This routine is intended for devices with limited internal buffer
* It automerges samples of same type, and handles timeouts. Returns non-zero
* if the event was added, and zero if the event was ignored due to idle
* processing.
*/
int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
{
if (!dev->raw)
return -EINVAL;
/* Ignore spaces in idle mode */
if (dev->idle && !ev->pulse)
return 0;
else if (dev->idle)
ir_raw_event_set_idle(dev, false);
if (!dev->raw->this_ev.duration)
dev->raw->this_ev = *ev;
else if (ev->pulse == dev->raw->this_ev.pulse)
dev->raw->this_ev.duration += ev->duration;
else {
ir_raw_event_store(dev, &dev->raw->this_ev);
dev->raw->this_ev = *ev;
}
/* Enter idle mode if nessesary */
if (!ev->pulse && dev->timeout &&
dev->raw->this_ev.duration >= dev->timeout)
ir_raw_event_set_idle(dev, true);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maxim levitsky | maxim levitsky | 134 | 83.75% | 2 | 50.00% |
david hardeman | david hardeman | 25 | 15.62% | 1 | 25.00% |
sean young | sean young | 1 | 0.62% | 1 | 25.00% |
| Total | 160 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
/**
* ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
* @dev: the struct rc_dev device descriptor
* @idle: whether the device is idle or not
*/
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
{
if (!dev->raw)
return;
IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
if (idle) {
dev->raw->this_ev.timeout = true;
ir_raw_event_store(dev, &dev->raw->this_ev);
init_ir_raw_event(&dev->raw->this_ev);
}
if (dev->s_idle)
dev->s_idle(dev, idle);
dev->idle = idle;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maxim levitsky | maxim levitsky | 79 | 84.95% | 2 | 66.67% |
david hardeman | david hardeman | 14 | 15.05% | 1 | 33.33% |
| Total | 93 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
/**
* ir_raw_event_handle() - schedules the decoding of stored ir data
* @dev: the struct rc_dev device descriptor
*
* This routine will tell rc-core to start decoding stored ir data.
*/
void ir_raw_event_handle(struct rc_dev *dev)
{
unsigned long flags;
if (!dev->raw)
return;
spin_lock_irqsave(&dev->raw->lock, flags);
wake_up_process(dev->raw->thread);
spin_unlock_irqrestore(&dev->raw->lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maxim levitsky | maxim levitsky | 28 | 50.91% | 2 | 33.33% |
david hardeman | david hardeman | 24 | 43.64% | 2 | 33.33% |
mauro carvalho chehab | mauro carvalho chehab | 3 | 5.45% | 2 | 33.33% |
| Total | 55 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
/* used internally by the sysfs interface */
u64
ir_raw_get_allowed_protocols(void)
{
u64 protocols;
mutex_lock(&ir_raw_handler_lock);
protocols = available_protocols;
mutex_unlock(&ir_raw_handler_lock);
return protocols;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hardeman | david hardeman | 24 | 82.76% | 1 | 33.33% |
randy dunlap | randy dunlap | 3 | 10.34% | 1 | 33.33% |
maxim levitsky | maxim levitsky | 2 | 6.90% | 1 | 33.33% |
| Total | 29 | 100.00% | 3 | 100.00% |
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
/* the caller will update dev->enabled_protocols */
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hardeman | david hardeman | 19 | 100.00% | 1 | 100.00% |
| Total | 19 | 100.00% | 1 | 100.00% |
static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
{
mutex_lock(&dev->lock);
dev->enabled_protocols &= ~protocols;
dev->enabled_wakeup_protocols &= ~protocols;
mutex_unlock(&dev->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
heiner kallweit | heiner kallweit | 44 | 100.00% | 1 | 100.00% |
| Total | 44 | 100.00% | 1 | 100.00% |
/*
* Used to (un)register raw event clients
*/
int ir_raw_event_register(struct rc_dev *dev)
{
int rc;
struct ir_raw_handler *handler;
if (!dev)
return -EINVAL;
dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
if (!dev->raw)
return -ENOMEM;
dev->raw->dev = dev;
dev->change_protocol = change_protocol;
INIT_KFIFO(dev->raw->kfifo);
spin_lock_init(&dev->raw->lock);
dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
"rc%u", dev->minor);
if (IS_ERR(dev->raw->thread)) {
rc = PTR_ERR(dev->raw->thread);
goto out;
}
mutex_lock(&ir_raw_handler_lock);
list_add_tail(&dev->raw->list, &ir_raw_client_list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_register)
handler->raw_register(dev);
mutex_unlock(&ir_raw_handler_lock);
return 0;
out:
kfree(dev->raw);
dev->raw = NULL;
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hardeman | david hardeman | 155 | 76.35% | 6 | 54.55% |
maxim levitsky | maxim levitsky | 40 | 19.70% | 3 | 27.27% |
dan carpenter | dan carpenter | 7 | 3.45% | 1 | 9.09% |
heiner kallweit | heiner kallweit | 1 | 0.49% | 1 | 9.09% |
| Total | 203 | 100.00% | 11 | 100.00% |
void ir_raw_event_unregister(struct rc_dev *dev)
{
struct ir_raw_handler *handler;
if (!dev || !dev->raw)
return;
kthread_stop(dev->raw->thread);
mutex_lock(&ir_raw_handler_lock);
list_del(&dev->raw->list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_unregister)
handler->raw_unregister(dev);
mutex_unlock(&ir_raw_handler_lock);
kfree(dev->raw);
dev->raw = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hardeman | david hardeman | 87 | 95.60% | 3 | 60.00% |
maxim levitsky | maxim levitsky | 4 | 4.40% | 2 | 40.00% |
| Total | 91 | 100.00% | 5 | 100.00% |
/*
* Extension interface - used to register the IR decoders
*/
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
{
struct ir_raw_event_ctrl *raw;
mutex_lock(&ir_raw_handler_lock);
list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
if (ir_raw_handler->raw_register)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
available_protocols |= ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hardeman | david hardeman | 40 | 57.14% | 3 | 60.00% |
mauro carvalho chehab | mauro carvalho chehab | 28 | 40.00% | 1 | 20.00% |
maxim levitsky | maxim levitsky | 2 | 2.86% | 1 | 20.00% |
| Total | 70 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(ir_raw_handler_register);
void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
{
struct ir_raw_event_ctrl *raw;
u64 protocols = ir_raw_handler->protocols;
mutex_lock(&ir_raw_handler_lock);
list_del(&ir_raw_handler->list);
list_for_each_entry(raw, &ir_raw_client_list, list) {
ir_raw_disable_protocols(raw->dev, protocols);
if (ir_raw_handler->raw_unregister)
ir_raw_handler->raw_unregister(raw->dev);
}
available_protocols &= ~protocols;
mutex_unlock(&ir_raw_handler_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mauro carvalho chehab | mauro carvalho chehab | 28 | 34.57% | 1 | 16.67% |
david hardeman | david hardeman | 27 | 33.33% | 3 | 50.00% |
heiner kallweit | heiner kallweit | 24 | 29.63% | 1 | 16.67% |
maxim levitsky | maxim levitsky | 2 | 2.47% | 1 | 16.67% |
| Total | 81 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(ir_raw_handler_unregister);
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hardeman | david hardeman | 647 | 46.45% | 9 | 26.47% |
maxim levitsky | maxim levitsky | 400 | 28.72% | 6 | 17.65% |
mauro carvalho chehab | mauro carvalho chehab | 177 | 12.71% | 9 | 26.47% |
heiner kallweit | heiner kallweit | 118 | 8.47% | 3 | 8.82% |
jarod wilson | jarod wilson | 18 | 1.29% | 1 | 2.94% |
srinivas kandagatla | srinivas kandagatla | 15 | 1.08% | 1 | 2.94% |
dan carpenter | dan carpenter | 7 | 0.50% | 1 | 2.94% |
stephen rothwell | stephen rothwell | 3 | 0.22% | 1 | 2.94% |
randy dunlap | randy dunlap | 3 | 0.22% | 1 | 2.94% |
paul gortmaker | paul gortmaker | 3 | 0.22% | 1 | 2.94% |
sean young | sean young | 2 | 0.14% | 1 | 2.94% |
| Total | 1393 | 100.00% | 34 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.