Release 4.12 include/linux/virtio_ring.h
#ifndef _LINUX_VIRTIO_RING_H
#define _LINUX_VIRTIO_RING_H
#include <asm/barrier.h>
#include <linux/irqreturn.h>
#include <uapi/linux/virtio_ring.h>
/*
* Barriers in virtio are tricky. Non-SMP virtio guests can't assume
* they're not on an SMP host system, so they need to assume real
* barriers. Non-SMP virtio hosts could skip the barriers, but does
* anyone care?
*
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so virt_mb() et al are
* sufficient.
*
* For using virtio to talk to real devices (eg. other heterogeneous
* CPUs) we do need real barriers. In theory, we could be using both
* kinds of virtio, so it's a runtime decision, and the branch is
* actually quite cheap.
*/
static inline void virtio_mb(bool weak_barriers)
{
if (weak_barriers)
virt_mb();
else
mb();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 20 | 95.24% | 1 | 50.00% |
Michael S. Tsirkin | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline void virtio_rmb(bool weak_barriers)
{
if (weak_barriers)
virt_rmb();
else
rmb();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 20 | 95.24% | 1 | 50.00% |
Michael S. Tsirkin | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline void virtio_wmb(bool weak_barriers)
{
if (weak_barriers)
virt_wmb();
else
wmb();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 20 | 95.24% | 1 | 50.00% |
Michael S. Tsirkin | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline void virtio_store_mb(bool weak_barriers,
__virtio16 *p, __virtio16 v)
{
if (weak_barriers) {
virt_store_mb(*p, v);
} else {
WRITE_ONCE(*p, v);
mb();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael S. Tsirkin | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
struct virtio_device;
struct virtqueue;
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
* expected. The caller should query virtqueue_get_ring_size to learn
* the actual size of the ring.
*/
struct virtqueue *vring_create_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool ctx,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
/* Creates a virtqueue with a custom layout. */
struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring vring,
struct virtio_device *vdev,
bool weak_barriers,
bool ctx,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name);
/*
* Creates a virtqueue with a standard layout but a caller-allocated
* ring.
*/
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool ctx,
void *pages,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
/*
* Destroys a virtqueue. If created with vring_create_virtqueue, this
* also frees the ring.
*/
void vring_del_virtqueue(struct virtqueue *vq);
/* Filter out transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev);
irqreturn_t vring_interrupt(int irq, void *_vq);
#endif /* _LINUX_VIRTIO_RING_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 163 | 47.66% | 7 | 46.67% |
Andrew Lutomirski | 111 | 32.46% | 1 | 6.67% |
Michael S. Tsirkin | 61 | 17.84% | 4 | 26.67% |
Jason (Hui) Wang | 4 | 1.17% | 1 | 6.67% |
David Howells | 2 | 0.58% | 1 | 6.67% |
Heinz Graalfs | 1 | 0.29% | 1 | 6.67% |
Total | 342 | 100.00% | 15 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.