Release 4.11 drivers/media/dvb-core/dvb_ringbuffer.c
/*
*
* dvb_ringbuffer.c: ring buffer implementation for the dvb driver
*
* Copyright (C) 2003 Oliver Endriss
* Copyright (C) 2004 Andrew de Quincey
*
* based on code originally found in av7110.c & dvb_ci.c:
* Copyright (C) 1999-2003 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include "dvb_ringbuffer.h"
#define PKT_READY 0
#define PKT_DISPOSED 1
void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
{
rbuf->pread=rbuf->pwrite=0;
rbuf->data=data;
rbuf->size=len;
rbuf->error=0;
init_waitqueue_head(&rbuf->queue);
spin_lock_init(&(rbuf->lock));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 55 | 87.30% | 1 | 33.33% |
Andreas Oberritter | 6 | 9.52% | 1 | 33.33% |
Michael Hunold | 2 | 3.17% | 1 | 33.33% |
Total | 63 | 100.00% | 3 | 100.00% |
int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
{
/* smp_load_acquire() to load write pointer on reader side
* this pairs with smp_store_release() in dvb_ringbuffer_write(),
* dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
*
* for memory barriers also see Documentation/circular-buffers.txt
*/
return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 19 | 73.08% | 1 | 33.33% |
Soeren Moch | 5 | 19.23% | 1 | 33.33% |
Michael Hunold | 2 | 7.69% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
{
ssize_t free;
/* ACCESS_ONCE() to load read pointer on writer side
* this pairs with smp_store_release() in dvb_ringbuffer_read(),
* dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
* or dvb_ringbuffer_reset()
*/
free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
if (free <= 0)
free += rbuf->size;
return free-1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 38 | 86.36% | 1 | 33.33% |
Soeren Moch | 4 | 9.09% | 1 | 33.33% |
Michael Hunold | 2 | 4.55% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
{
ssize_t avail;
/* smp_load_acquire() to load write pointer on reader side
* this pairs with smp_store_release() in dvb_ringbuffer_write(),
* dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
*/
avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread;
if (avail < 0)
avail += rbuf->size;
return avail;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 36 | 83.72% | 1 | 33.33% |
Soeren Moch | 5 | 11.63% | 1 | 33.33% |
Michael Hunold | 2 | 4.65% | 1 | 33.33% |
Total | 43 | 100.00% | 3 | 100.00% |
void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
{
/* dvb_ringbuffer_flush() counts as read operation
* smp_load_acquire() to load write pointer
* smp_store_release() to update read pointer, this ensures that the
* correct pointer is visible for subsequent dvb_ringbuffer_free()
* calls on other cpu cores
*/
smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
rbuf->error = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 15 | 45.45% | 1 | 25.00% |
Soeren Moch | 10 | 30.30% | 1 | 25.00% |
Andreas Oberritter | 6 | 18.18% | 1 | 25.00% |
Michael Hunold | 2 | 6.06% | 1 | 25.00% |
Total | 33 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(dvb_ringbuffer_flush);
void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf)
{
/* dvb_ringbuffer_reset() counts as read and write operation
* smp_store_release() to update read pointer
*/
smp_store_release(&rbuf->pread, 0);
/* smp_store_release() to update write pointer */
smp_store_release(&rbuf->pwrite, 0);
rbuf->error = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Odetti | 24 | 63.16% | 1 | 50.00% |
Soeren Moch | 14 | 36.84% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf)
{
unsigned long flags;
spin_lock_irqsave(&rbuf->lock, flags);
dvb_ringbuffer_flush(rbuf);
spin_unlock_irqrestore(&rbuf->lock, flags);
wake_up(&rbuf->queue);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 45 | 95.74% | 1 | 50.00% |
Michael Hunold | 2 | 4.26% | 1 | 50.00% |
Total | 47 | 100.00% | 2 | 100.00% |
ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, size_t len)
{
size_t todo = len;
size_t split;
split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0;
if (split > 0) {
if (copy_to_user(buf, rbuf->data+rbuf->pread, split))
return -EFAULT;
buf += split;
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
* this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
return -EFAULT;
/* smp_store_release() to update read pointer, see above */
smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
return len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 89 | 62.24% | 1 | 25.00% |
Al Viro | 40 | 27.97% | 1 | 25.00% |
Soeren Moch | 12 | 8.39% | 1 | 25.00% |
Michael Hunold | 2 | 1.40% | 1 | 25.00% |
Total | 143 | 100.00% | 4 | 100.00% |
void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
{
size_t todo = len;
size_t split;
split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0;
if (split > 0) {
memcpy(buf, rbuf->data+rbuf->pread, split);
buf += split;
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
* this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
memcpy(buf, rbuf->data+rbuf->pread, todo);
/* smp_store_release() to update read pointer, see above */
smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 62 | 48.82% | 1 | 25.00% |
Alan Cox | 52 | 40.94% | 1 | 25.00% |
Soeren Moch | 12 | 9.45% | 1 | 25.00% |
Andrew Morton | 1 | 0.79% | 1 | 25.00% |
Total | 127 | 100.00% | 4 | 100.00% |
ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t len)
{
size_t todo = len;
size_t split;
split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0;
if (split > 0) {
memcpy(rbuf->data+rbuf->pwrite, buf, split);
buf += split;
todo -= split;
/* smp_store_release() for write pointer update to ensure that
* written data is visible on other cpu cores before the pointer
* update, this pairs with smp_load_acquire() in
* dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
*/
smp_store_release(&rbuf->pwrite, 0);
}
memcpy(rbuf->data+rbuf->pwrite, buf, todo);
/* smp_store_release() for write pointer update, see above */
smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
return len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 116 | 88.55% | 1 | 25.00% |
Soeren Moch | 12 | 9.16% | 1 | 25.00% |
Michael Hunold | 2 | 1.53% | 1 | 25.00% |
Andrew Morton | 1 | 0.76% | 1 | 25.00% |
Total | 131 | 100.00% | 4 | 100.00% |
ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
const u8 __user *buf, size_t len)
{
int status;
size_t todo = len;
size_t split;
split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0;
if (split > 0) {
status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split);
if (status)
return len - todo;
buf += split;
todo -= split;
/* smp_store_release() for write pointer update to ensure that
* written data is visible on other cpu cores before the pointer
* update, this pairs with smp_load_acquire() in
* dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
*/
smp_store_release(&rbuf->pwrite, 0);
}
status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
if (status)
return len - todo;
/* smp_store_release() for write pointer update, see above */
smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
return len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mauro Carvalho Chehab | 145 | 92.36% | 1 | 50.00% |
Soeren Moch | 12 | 7.64% | 1 | 50.00% |
Total | 157 | 100.00% | 2 | 100.00% |
ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t len)
{
int status;
ssize_t oldpwrite = rbuf->pwrite;
DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8);
DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff);
DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_READY);
status = dvb_ringbuffer_write(rbuf, buf, len);
if (status < 0) rbuf->pwrite = oldpwrite;
return status;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Hunold | 78 | 100.00% | 1 | 100.00% |
Total | 78 | 100.00% | 1 | 100.00% |
ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8 __user *buf, size_t len)
{
size_t todo;
size_t split;
size_t pktlen;
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
if (offset > pktlen) return -EINVAL;
if ((offset + len) > pktlen) len = pktlen - offset;
idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size;
todo = len;
split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0;
if (split > 0) {
if (copy_to_user(buf, rbuf->data+idx, split))
return -EFAULT;
buf += split;
todo -= split;
idx = 0;
}
if (copy_to_user(buf, rbuf->data+idx, todo))
return -EFAULT;
return len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Hunold | 163 | 86.70% | 1 | 50.00% |
Al Viro | 25 | 13.30% | 1 | 50.00% |
Total | 188 | 100.00% | 2 | 100.00% |
ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8* buf, size_t len)
{
size_t todo;
size_t split;
size_t pktlen;
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
if (offset > pktlen) return -EINVAL;
if ((offset + len) > pktlen) len = pktlen - offset;
idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size;
todo = len;
split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0;
if (split > 0) {
memcpy(buf, rbuf->data+idx, split);
buf += split;
todo -= split;
idx = 0;
}
memcpy(buf, rbuf->data+idx, todo);
return len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 137 | 78.29% | 1 | 50.00% |
Michael Hunold | 38 | 21.71% | 1 | 50.00% |
Total | 175 | 100.00% | 2 | 100.00% |
void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
{
size_t pktlen;
rbuf->data[(idx + 2) % rbuf->size] = PKT_DISPOSED;
// clean up disposed packets
while(dvb_ringbuffer_avail(rbuf) > DVB_RINGBUFFER_PKTHDRSIZE) {
if (DVB_RINGBUFFER_PEEK(rbuf, 2) == PKT_DISPOSED) {
pktlen = DVB_RINGBUFFER_PEEK(rbuf, 0) << 8;
pktlen |= DVB_RINGBUFFER_PEEK(rbuf, 1);
DVB_RINGBUFFER_SKIP(rbuf, pktlen + DVB_RINGBUFFER_PKTHDRSIZE);
} else {
// first packet is not disposed, so we stop cleaning now
break;
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Hunold | 92 | 100.00% | 1 | 100.00% |
Total | 92 | 100.00% | 1 | 100.00% |
ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen)
{
int consumed;
int curpktlen;
int curpktstatus;
if (idx == -1) {
idx = rbuf->pread;
} else {
curpktlen = rbuf->data[idx] << 8;
curpktlen |= rbuf->data[(idx + 1) % rbuf->size];
idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
}
consumed = (idx - rbuf->pread) % rbuf->size;
while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) {
curpktlen = rbuf->data[idx] << 8;
curpktlen |= rbuf->data[(idx + 1) % rbuf->size];
curpktstatus = rbuf->data[(idx + 2) % rbuf->size];
if (curpktstatus == PKT_READY) {
*pktlen = curpktlen;
return idx;
}
consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE;
idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
}
// no packets available
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Hunold | 201 | 100.00% | 1 | 100.00% |
Total | 201 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(dvb_ringbuffer_init);
EXPORT_SYMBOL(dvb_ringbuffer_empty);
EXPORT_SYMBOL(dvb_ringbuffer_free);
EXPORT_SYMBOL(dvb_ringbuffer_avail);
EXPORT_SYMBOL(dvb_ringbuffer_flush_spinlock_wakeup);
EXPORT_SYMBOL(dvb_ringbuffer_read_user);
EXPORT_SYMBOL(dvb_ringbuffer_read);
EXPORT_SYMBOL(dvb_ringbuffer_write);
EXPORT_SYMBOL(dvb_ringbuffer_write_user);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Hunold | 606 | 36.37% | 3 | 21.43% |
Alan Cox | 503 | 30.19% | 1 | 7.14% |
Al Viro | 269 | 16.15% | 1 | 7.14% |
Mauro Carvalho Chehab | 150 | 9.00% | 1 | 7.14% |
Soeren Moch | 86 | 5.16% | 1 | 7.14% |
Andrea Odetti | 24 | 1.44% | 1 | 7.14% |
Andreas Oberritter | 12 | 0.72% | 1 | 7.14% |
Andrew Morton | 9 | 0.54% | 2 | 14.29% |
Oliver Endriss | 5 | 0.30% | 1 | 7.14% |
Linus Torvalds | 1 | 0.06% | 1 | 7.14% |
Sakari Ailus | 1 | 0.06% | 1 | 7.14% |
Total | 1666 | 100.00% | 14 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.