cregit-Linux how code gets into the kernel

Release 4.11 drivers/virtio/virtio_ring.c

Directory: drivers/virtio
/* Virtio ring implementation.
 *
 *  Copyright 2007 Rusty Russell IBM Corporation
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_config.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/kmemleak.h>
#include <linux/dma-mapping.h>
#include <xen/xen.h>

#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */

#define BAD_RING(_vq, fmt, args...)				\
	do {                                                    \
                dev_err(&(_vq)->vq.vdev->dev,                   \
                        "%s:"fmt, (_vq)->vq.name, ##args);      \
                BUG();                                          \
        } while (0)
/* Caller is supposed to guarantee no reentry. */

#define START_USE(_vq)						\
	do {                                                    \
                if ((_vq)->in_use)                              \
                        panic("%s:in_use = %i\n",               \
                              (_vq)->vq.name, (_vq)->in_use);   \
                (_vq)->in_use = __LINE__;                       \
        } while (0)

#define END_USE(_vq) \
	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
#else

#define BAD_RING(_vq, fmt, args...)				\
	do {                                                    \
                dev_err(&_vq->vq.vdev->dev,                     \
                        "%s:"fmt, (_vq)->vq.name, ##args);      \
                (_vq)->broken = true;                           \
        } while (0)

#define START_USE(vq)

#define END_USE(vq)
#endif


struct vring_desc_state {
	
void *data;			/* Data for callback. */
	
struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
};


struct vring_virtqueue {
	
struct virtqueue vq;

	/* Actual memory layout for this queue */
	
struct vring vring;

	/* Can we use weak barriers? */
	
bool weak_barriers;

	/* Other side has made a mess, don't try any more. */
	
bool broken;

	/* Host supports indirect buffers */
	
bool indirect;

	/* Host publishes avail event idx */
	
bool event;

	/* Head of free buffer list. */
	
unsigned int free_head;
	/* Number we've added since last sync. */
	
unsigned int num_added;

	/* Last used index we've seen. */
	
u16 last_used_idx;

	/* Last written value to avail->flags */
	
u16 avail_flags_shadow;

	/* Last written value to avail->idx in guest byte order */
	
u16 avail_idx_shadow;

	/* How to notify other side. FIXME: commonalize hcalls! */
	
bool (*notify)(struct virtqueue *vq);

	/* DMA, allocation, and size information */
	
bool we_own_ring;
	
size_t queue_size_in_bytes;
	
dma_addr_t queue_dma_addr;

#ifdef DEBUG
	/* They're supposed to lock for us. */
	
unsigned int in_use;

	/* Figure out if their kicks are too delayed. */
	
bool last_add_time_valid;
	
ktime_t last_add_time;
#endif

	/* Per-descriptor state. */
	
struct vring_desc_state desc_state[];
};


#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)

/*
 * Modern virtio devices have feature bits to specify whether they need a
 * quirk and bypass the IOMMU. If not there, just use the DMA API.
 *
 * If there, the interaction between virtio and DMA API is messy.
 *
 * On most systems with virtio, physical addresses match bus addresses,
 * and it doesn't particularly matter whether we use the DMA API.
 *
 * On some systems, including Xen and any system with a physical device
 * that speaks virtio behind a physical IOMMU, we must use the DMA API
 * for virtio DMA to work at all.
 *
 * On other systems, including SPARC and PPC64, virtio-pci devices are
 * enumerated as though they are behind an IOMMU, but the virtio host
 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
 * there or somehow map everything as the identity.
 *
 * For the time being, we preserve historic behavior and bypass the DMA
 * API.
 *
 * TODO: install a per-device DMA ops structure that does the right thing
 * taking into account all the above quirks, and use the DMA API
 * unconditionally on data path.
 */


static bool vring_use_dma_api(struct virtio_device *vdev) { if (!virtio_has_iommu_quirk(vdev)) return true; /* Otherwise, we are left to guess. */ /* * In theory, it's possible to have a buggy QEMU-supposed * emulated Q35 IOMMU and Xen enabled at the same time. On * such a configuration, virtio has never worked and will * not work without an even larger kludge. Instead, enable * the DMA API if we're a Xen guest, which at least allows * all of the sensible Xen configurations to work correctly. */ if (xen_domain()) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski2365.71%266.67%
Michael S. Tsirkin1234.29%133.33%
Total35100.00%3100.00%

/* * The DMA ops on various arches are rather gnarly right now, and * making all of the arch DMA ops work on the vring device itself * is a mess. For now, we use the parent device for DMA ops. */
static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) { return vq->vq.vdev->dev.parent; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski2492.31%133.33%
Michael S. Tsirkin13.85%133.33%
Baoyou Xie13.85%133.33%
Total26100.00%3100.00%

/* Map one sg entry. */
static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg, enum dma_data_direction direction) { if (!vring_use_dma_api(vq->vq.vdev)) return (dma_addr_t)sg_phys(sg); /* * We can't use dma_map_sg, because we don't use scatterlists in * the way it expects (we don't guarantee that the scatterlist * will exist for the lifetime of the mapping). */ return dma_map_page(vring_dma_dev(vq), sg_page(sg), sg->offset, sg->length, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski67100.00%1100.00%
Total67100.00%1100.00%


static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, void *cpu_addr, size_t size, enum dma_data_direction direction) { if (!vring_use_dma_api(vq->vq.vdev)) return (dma_addr_t)virt_to_phys(cpu_addr); return dma_map_single(vring_dma_dev(vq), cpu_addr, size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski59100.00%1100.00%
Total59100.00%1100.00%


static void vring_unmap_one(const struct vring_virtqueue *vq, struct vring_desc *desc) { u16 flags; if (!vring_use_dma_api(vq->vq.vdev)) return; flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); if (flags & VRING_DESC_F_INDIRECT) { dma_unmap_single(vring_dma_dev(vq), virtio64_to_cpu(vq->vq.vdev, desc->addr), virtio32_to_cpu(vq->vq.vdev, desc->len), (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { dma_unmap_page(vring_dma_dev(vq), virtio64_to_cpu(vq->vq.vdev, desc->addr), virtio32_to_cpu(vq->vq.vdev, desc->len), (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski147100.00%1100.00%
Total147100.00%1100.00%


static int vring_mapping_error(const struct vring_virtqueue *vq, dma_addr_t addr) { if (!vring_use_dma_api(vq->vq.vdev)) return 0; return dma_mapping_error(vring_dma_dev(vq), addr); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski41100.00%1100.00%
Total41100.00%1100.00%


static struct vring_desc *alloc_indirect(struct virtqueue *_vq, unsigned int total_sg, gfp_t gfp) { struct vring_desc *desc; unsigned int i; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~__GFP_HIGHMEM; desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) return NULL; for (i = 0; i < total_sg; i++) desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); return desc; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin5761.96%114.29%
Michael S. Tsirkin1516.30%228.57%
Rusty Russell1415.22%342.86%
Will Deacon66.52%114.29%
Total92100.00%7100.00%


static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; struct vring_desc *desc; unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; int head; bool indirect; START_USE(vq); BUG_ON(data == NULL); if (unlikely(vq->broken)) { END_USE(vq); return -EIO; } #ifdef DEBUG { ktime_t now = ktime_get(); /* No kick or get, with .1 second between? Warn. */ if (vq->last_add_time_valid) WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) > 100); vq->last_add_time = now; vq->last_add_time_valid = true; } #endif BUG_ON(total_sg > vq->vring.num); BUG_ON(total_sg == 0); head = vq->free_head; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 1 && vq->vq.num_free) desc = alloc_indirect(_vq, total_sg, gfp); else desc = NULL; if (desc) { /* Use a single buffer which doesn't continue */ indirect = true; /* Set up rest to use this indirect table. */ i = 0; descs_used = 1; } else { indirect = false; desc = vq->vring.desc; i = head; descs_used = total_sg; } if (vq->vq.num_free < descs_used) { pr_debug("Can't add buf len %i - avail = %i\n", descs_used, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); if (indirect) kfree(desc); END_USE(vq); return -ENOSPC; } for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) goto unmap_release; desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); prev = i; i = virtio16_to_cpu(_vq->vdev, desc[i].next); } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); if (vring_mapping_error(vq, addr)) goto unmap_release; desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); prev = i; i = virtio16_to_cpu(_vq->vdev, desc[i].next); } } /* Last one doesn't continue. */ desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); if (indirect) { /* Now that the indirect table is filled in, map it. */ dma_addr_t addr = vring_map_single( vq, desc, total_sg * sizeof(struct vring_desc), DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) goto unmap_release; vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr); vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); } /* We're using some buffers from the free list. */ vq->vq.num_free -= descs_used; /* Update free pointer */ if (indirect) vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); else vq->free_head = i; /* Store token and indirect buffer state. */ vq->desc_state[head].data = data; if (indirect) vq->desc_state[head].indir_desc = desc; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = vq->avail_idx_shadow & (vq->vring.num - 1); vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->avail_idx_shadow++; vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); vq->num_added++; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); return 0; unmap_release: err_idx = i; i = head; for (n = 0; n < total_sg; n++) { if (i == err_idx) break; vring_unmap_one(vq, &desc[i]); i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); } vq->vq.num_free += total_sg; if (indirect) kfree(desc); END_USE(vq); return -EIO; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell54055.33%1356.52%
Andrew Lutomirski27327.97%14.35%
Michael S. Tsirkin10010.25%417.39%
Mark McLoughlin262.66%14.35%
Tetsuo Handa141.43%14.35%
Wei Yongjun90.92%14.35%
Venkatesh Srinivas70.72%14.35%
Gonglei (Arei)70.72%14.35%
Total976100.00%23100.00%

/** * virtqueue_add_sgs - expose buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_num: the number of scatterlists readable by other side * @in_num: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */
int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { unsigned int i, total_sg = 0; /* Count them first. */ for (i = 0; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_sg++; } return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell101100.00%2100.00%
Total101100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */
int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell45100.00%2100.00%
Total45100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */
int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell45100.00%2100.00%
Total45100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */
bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq->weak_barriers); old = vq->avail_idx_shadow - vq->num_added; new = vq->avail_idx_shadow; vq->num_added = 0; #ifdef DEBUG if (vq->last_add_time_valid) { WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), vq->last_add_time)) > 100); } vq->last_add_time_valid = false; #endif if (vq->event) { needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), new, old); } else { needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); } END_USE(vq); return needs_kick; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell11871.52%660.00%
Michael S. Tsirkin4326.06%220.00%
Venkatesh Srinivas21.21%110.00%
Jason (Hui) Wang21.21%110.00%
Total165100.00%10100.00%

EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @vq: the struct virtqueue * * This does not need to be serialized. * * Returns false if host notify failed or queue is broken, otherwise true. */
bool virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (unlikely(vq->broken)) return false; /* Prod other side to tell it about changes. */ if (!vq->notify(_vq)) { vq->broken = true; return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Heinz Graalfs3052.63%250.00%
Rusty Russell2747.37%250.00%
Total57100.00%4100.00%

EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_* calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns false if kick failed, otherwise true. */
bool virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) return virtqueue_notify(vq); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell2076.92%266.67%
Heinz Graalfs623.08%133.33%
Total26100.00%3100.00%

EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head) { unsigned int i, j; __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); /* Clear data ptr. */ vq->desc_state[head].data = NULL; /* Put back on free list: unmap first-level descriptors and find end */ i = head; while (vq->vring.desc[i].flags & nextflag) { vring_unmap_one(vq, &vq->vring.desc[i]); i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); vq->vq.num_free++; } vring_unmap_one(vq, &vq->vring.desc[i]); vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; /* Free the indirect table, if any, now that it's unmapped. */ if (vq->desc_state[head].indir_desc) { struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); BUG_ON(!(vq->vring.desc[head].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); BUG_ON(len == 0 || len % sizeof(struct vring_desc)); for (j = 0; j < len / sizeof(struct vring_desc); j++) vring_unmap_one(vq, &indir_desc[j]); kfree(vq->desc_state[head].indir_desc); vq->desc_state[head].indir_desc = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski18157.83%116.67%
Rusty Russell6922.04%233.33%
Michael S. Tsirkin3912.46%116.67%
Mark McLoughlin237.35%116.67%
Gonglei (Arei)10.32%116.67%
Total313100.00%6100.00%


static inline bool more_used(const struct vring_virtqueue *vq) { return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell2674.29%150.00%
Michael S. Tsirkin925.71%150.00%
Total35100.00%2100.00%

/** * virtqueue_get_buf - get the next used buffer * @vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * * If the device wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_*(). */
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->desc_state[i].data)) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->desc_state[i].data; detach_buf(vq, i); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) virtio_store_mb(vq->weak_barriers, &vring_used_event(&vq->vring), cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); #ifdef DEBUG vq->last_add_time_valid = false; #endif END_USE(vq); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell23178.84%642.86%
Michael S. Tsirkin5518.77%642.86%
Andrew Lutomirski62.05%17.14%
Venkatesh Srinivas10.34%17.14%
Total293100.00%14100.00%

EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */
void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); } }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell2741.54%120.00%
Venkatesh Srinivas2335.38%120.00%
Michael S. Tsirkin812.31%240.00%
Ladi Prosek710.77%120.00%
Total65100.00%5100.00%

EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by * virtqueue_poll, to detect a possible race between the driver checking for * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used_idx; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); } vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); END_USE(vq); return last_used_idx; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin3938.24%342.86%
Rusty Russell3534.31%228.57%
Venkatesh Srinivas2120.59%114.29%
Ladi Prosek76.86%114.29%
Total102100.00%7100.00%

EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers * @vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. * * This does not need to be serialized. */
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); virtio_mb(vq->weak_barriers); return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin4384.31%266.67%
Rusty Russell815.69%133.33%
Total51100.00%3100.00%

EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */
bool virtqueue_enable_cb(struct virtqueue *_vq) { unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); return !virtqueue_poll(_vq, last_used_idx); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin2488.89%150.00%
Rusty Russell311.11%150.00%
Total27100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always update the event index to keep code simple. */ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); } /* TODO: tune this threshold */ bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; virtio_store_mb(vq->weak_barriers, &vring_used_event(&vq->vring), cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin14081.87%350.00%
Venkatesh Srinivas2212.87%116.67%
Ladi Prosek84.68%116.67%
Rusty Russell10.58%116.67%
Total171100.00%6100.00%

EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_*(). * This is not valid on an active queue; it is useful only for device * shutdown. */
void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->vring.num; i++) { if (!vq->desc_state[i].data) continue; /* detach_buf clears data, so grab it now. */ buf = vq->desc_state[i].data; detach_buf(vq, i); vq->avail_idx_shadow--; vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->vring.num); END_USE(vq); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Shirley Ma10876.60%114.29%
Michael S. Tsirkin117.80%228.57%
Amit Shah85.67%114.29%
Andrew Lutomirski64.26%114.29%
Venkatesh Srinivas64.26%114.29%
Rusty Russell21.42%114.29%
Total141100.00%7100.00%

EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) return IRQ_HANDLED; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell90100.00%2100.00%
Total90100.00%2100.00%

EXPORT_SYMBOL_GPL(vring_interrupt);
struct virtqueue *__vring_new_virtqueue(unsigned int index, struct vring vring, struct virtio_device *vdev, bool weak_barriers, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { unsigned int i; struct vring_virtqueue *vq; vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), GFP_KERNEL); if (!vq) return NULL; vq->vring = vring; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.num_free = vring.num; vq->vq.index = index; vq->we_own_ring = false; vq->queue_dma_addr = 0; vq->queue_size_in_bytes = 0; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->avail_flags_shadow = 0; vq->avail_idx_shadow = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ if (!callback) { vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); } /* Put everything in free lists. */ vq->free_head = 0; for (i = 0; i < vring.num-1; i++) vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); return &vq->vq; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell23665.74%741.18%
Andrew Lutomirski5415.04%211.76%
Venkatesh Srinivas246.69%15.88%
Michael S. Tsirkin215.85%211.76%
Mark McLoughlin113.06%15.88%
Ladi Prosek71.95%15.88%
Amit Shah30.84%15.88%
Jason (Hui) Wang20.56%15.88%
Heinz Graalfs10.28%15.88%
Total359100.00%17100.00%

EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { if (vring_use_dma_api(vdev)) { return dma_alloc_coherent(vdev->dev.parent, size, dma_handle, flag); } else { void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); if (queue) { phys_addr_t phys_addr = virt_to_phys(queue); *dma_handle = (dma_addr_t)phys_addr; /* * Sanity check: make sure we dind't truncate * the address. The only arches I can find that * have 64-bit phys_addr_t but 32-bit dma_addr_t * are certain non-highmem MIPS and x86 * configurations, but these configurations * should never allocate physical pages above 32 * bits, so this is fine. Just in case, throw a * warning and abort if we end up with an * unrepresentable address. */ if (WARN_ON_ONCE(*dma_handle != phys_addr)) { free_pages_exact(queue, PAGE_ALIGN(size)); return NULL; } } return queue; } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski9885.22%125.00%
Rusty Russell1714.78%375.00%
Total115100.00%4100.00%


static void vring_free_queue(struct virtio_device *vdev, size_t size, void *queue, dma_addr_t dma_handle) { if (vring_use_dma_api(vdev)) { dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); } else { free_pages_exact(queue, PAGE_ALIGN(size)); } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski4170.69%133.33%
Rusty Russell1627.59%133.33%
Michael S. Tsirkin11.72%133.33%
Total58100.00%3100.00%


struct virtqueue *vring_create_virtqueue( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { struct virtqueue *vq; void *queue = NULL; dma_addr_t dma_addr; size_t queue_size_in_bytes; struct vring vring; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } /* TODO: allocate each queue chunk individually */ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { queue = vring_alloc_queue(vdev, vring_size(num, vring_align), &dma_addr, GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); if (queue) break; } if (!num) return NULL; if (!queue) { /* Try to get a single page. You are my only hope! */ queue = vring_alloc_queue(vdev, vring_size(num, vring_align), &dma_addr, GFP_KERNEL|__GFP_ZERO); } if (!queue) return NULL; queue_size_in_bytes = vring_size(num, vring_align); vring_init(&vring, num, queue, vring_align); vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, notify, callback, name); if (!vq) { vring_free_queue(vdev, queue_size_in_bytes, queue, dma_addr); return NULL; } to_vvq(vq)->queue_dma_addr = dma_addr; to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; to_vvq(vq)->we_own_ring = true; return vq; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski28999.31%150.00%
Dan Carpenter20.69%150.00%
Total291100.00%2100.00%

EXPORT_SYMBOL_GPL(vring_create_virtqueue);
struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name) { struct vring vring; vring_init(&vring, num, pages, vring_align); return __vring_new_virtqueue(index, vring, vdev, weak_barriers, notify, callback, name); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski94100.00%1100.00%
Total94100.00%1100.00%

EXPORT_SYMBOL_GPL(vring_new_virtqueue);
void vring_del_virtqueue(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->we_own_ring) { vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, vq->vring.desc, vq->queue_dma_addr); } list_del(&_vq->list); kfree(vq); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski64100.00%1100.00%
Total64100.00%1100.00%

EXPORT_SYMBOL_GPL(vring_del_virtqueue); /* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; case VIRTIO_F_VERSION_1: break; case VIRTIO_F_IOMMU_PLATFORM: break; default: /* We don't understand this bit. */ __virtio_clear_bit(vdev, i); } } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski5693.33%150.00%
Michael S. Tsirkin46.67%150.00%
Total60100.00%2100.00%

EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->vring.num; }

Contributors

PersonTokensPropCommitsCommitProp
Rick Jones28100.00%1100.00%
Total28100.00%1100.00%

EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
bool virtqueue_is_broken(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->broken; }

Contributors

PersonTokensPropCommitsCommitProp
Heinz Graalfs25100.00%1100.00%
Total25100.00%1100.00%

EXPORT_SYMBOL_GPL(virtqueue_is_broken); /* * This should prevent the device from being used, allowing drivers to * recover. You may need to grab appropriate locks to flush. */
void virtio_break_device(struct virtio_device *dev) { struct virtqueue *_vq; list_for_each_entry(_vq, &dev->vqs, list) { struct vring_virtqueue *vq = to_vvq(_vq); vq->broken = true; } }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell41100.00%1100.00%
Total41100.00%1100.00%

EXPORT_SYMBOL_GPL(virtio_break_device);
dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); return vq->queue_dma_addr; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski3296.97%150.00%
Cornelia Huck13.03%150.00%
Total33100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); return vq->queue_dma_addr + ((char *)vq->vring.avail - (char *)vq->vring.desc); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski3054.55%150.00%
Cornelia Huck2545.45%150.00%
Total55100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); return vq->queue_dma_addr + ((char *)vq->vring.used - (char *)vq->vring.desc); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski3258.18%150.00%
Cornelia Huck2341.82%150.00%
Total55100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
const struct vring *virtqueue_get_vring(struct virtqueue *vq) { return &to_vvq(vq)->vring; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski2090.91%150.00%
Cornelia Huck29.09%150.00%
Total22100.00%2100.00%

EXPORT_SYMBOL_GPL(virtqueue_get_vring); MODULE_LICENSE("GPL");

Overall Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell192339.74%2738.03%
Andrew Lutomirski170435.21%45.63%
Michael S. Tsirkin60812.56%1419.72%
Mark McLoughlin1212.50%11.41%
Venkatesh Srinivas1142.36%11.41%
Shirley Ma1082.23%11.41%
Heinz Graalfs701.45%45.63%
Cornelia Huck591.22%11.41%
Rick Jones330.68%11.41%
Ladi Prosek290.60%11.41%
Tetsuo Handa140.29%11.41%
Amit Shah110.23%22.82%
Wei Yongjun90.19%11.41%
Gonglei (Arei)80.17%11.41%
Will Deacon60.12%11.41%
Roel Kluin40.08%11.41%
Jason (Hui) Wang40.08%22.82%
Tejun Heo30.06%11.41%
Joel Stanley30.06%11.41%
Paul Gortmaker30.06%11.41%
Dan Carpenter20.04%11.41%
Anthony Liguori10.02%11.41%
Baoyou Xie10.02%11.41%
Felipe Franciosi10.02%11.41%
Total4839100.00%71100.00%
Directory: drivers/virtio
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.