cregit-Linux how code gets into the kernel

Release 4.14 drivers/vhost/vhost.c

Directory: drivers/vhost
/* Copyright (C) 2009 Red Hat, Inc.
 * Copyright (C) 2006 Rusty Russell IBM Corporation
 *
 * Author: Michael S. Tsirkin <mst@redhat.com>
 *
 * Inspiration, some code, and most witty comments come from
 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
 *
 * This work is licensed under the terms of the GNU GPL, version 2.
 *
 * Generic code for virtio server in host kernel.
 */

#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/interval_tree_generic.h>

#include "vhost.h"


static ushort max_mem_regions = 64;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(max_mem_regions,
	"Maximum number of memory regions in memory map. (default: 64)");

static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
	"Maximum number of iotlb entries. (default: 2048)");

enum {
	
VHOST_MEMORY_F_LOG = 0x1,
};


#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])

#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])

INTERVAL_TREE_DEFINE(struct vhost_umem_node,
		     rb, __u64, __subtree_last,
		     START, LAST, static inline, vhost_umem_interval_tree);

#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY

static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) { vq->user_be = !virtio_legacy_is_little_endian(); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz19100.00%2100.00%
Total19100.00%2100.00%


static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) { vq->user_be = true; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz17100.00%1100.00%
Total17100.00%1100.00%


static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) { vq->user_be = false; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz17100.00%1100.00%
Total17100.00%1100.00%


static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) { struct vhost_vring_state s; if (vq->private_data) return -EBUSY; if (copy_from_user(&s, argp, sizeof(s))) return -EFAULT; if (s.num != VHOST_VRING_LITTLE_ENDIAN && s.num != VHOST_VRING_BIG_ENDIAN) return -EINVAL; if (s.num == VHOST_VRING_BIG_ENDIAN) vhost_enable_cross_endian_big(vq); else vhost_enable_cross_endian_little(vq); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz89100.00%2100.00%
Total89100.00%2100.00%


static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, int __user *argp) { struct vhost_vring_state s = { .index = idx, .num = vq->user_be }; if (copy_to_user(argp, &s, sizeof(s))) return -EFAULT; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz59100.00%1100.00%
Total59100.00%1100.00%


static void vhost_init_is_le(struct vhost_virtqueue *vq) { /* Note for legacy virtio: user_be is initialized at reset time * according to the host endianness. If userspace does not set an * explicit endianness, the default behavior is native endian, as * expected by legacy virtio. */ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz28100.00%1100.00%
Total28100.00%1100.00%

#else
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) { }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz10100.00%2100.00%
Total10100.00%2100.00%


static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) { return -ENOIOCTLCMD; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz20100.00%1100.00%
Total20100.00%1100.00%


static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, int __user *argp) { return -ENOIOCTLCMD; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz23100.00%1100.00%
Total23100.00%1100.00%


static void vhost_init_is_le(struct vhost_virtqueue *vq) { vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || virtio_legacy_is_little_endian(); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz1872.00%150.00%
Halil Pasic728.00%150.00%
Total25100.00%2100.00%

#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
static void vhost_reset_is_le(struct vhost_virtqueue *vq) { vhost_init_is_le(vq); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kurz1381.25%150.00%
Halil Pasic318.75%150.00%
Total16100.00%2100.00%

struct vhost_flush_struct { struct vhost_work work; struct completion wait_event; };
static void vhost_flush_work(struct vhost_work *work) { struct vhost_flush_struct *s; s = container_of(work, struct vhost_flush_struct, work); complete(&s->wait_event); }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang36100.00%1100.00%
Total36100.00%1100.00%


static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { struct vhost_poll *poll; poll = container_of(pt, struct vhost_poll, table); poll->wqh = wqh; add_wait_queue(wqh, &poll->wait); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin52100.00%1100.00%
Total52100.00%1100.00%


static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); if (!((unsigned long)key & poll->mask)) return 0; vhost_poll_queue(poll); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin5793.44%133.33%
Tejun Heo34.92%133.33%
Ingo Molnar11.64%133.33%
Total61100.00%3100.00%


void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) { clear_bit(VHOST_WORK_QUEUED, &work->flags); work->fn = fn; init_waitqueue_head(&work->done); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin3389.19%150.00%
Jason (Hui) Wang410.81%150.00%
Total37100.00%2100.00%

EXPORT_SYMBOL_GPL(vhost_work_init); /* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, unsigned long mask, struct vhost_dev *dev) { init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; poll->dev = dev; poll->wqh = NULL; vhost_work_init(&poll->work, fn); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin4462.86%250.00%
Tejun Heo2028.57%125.00%
Jason (Hui) Wang68.57%125.00%
Total70100.00%4100.00%

EXPORT_SYMBOL_GPL(vhost_poll_init); /* Start polling a file. We add ourselves to file's wait queue. The caller must * keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file) { unsigned long mask; int ret = 0; if (poll->wqh) return 0; mask = file->f_op->poll(file, &poll->table); if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); if (mask & POLLERR) { if (poll->wqh) remove_wait_queue(poll->wqh, &poll->wait); ret = -EINVAL; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin5653.33%133.33%
Jason (Hui) Wang4946.67%266.67%
Total105100.00%3100.00%

EXPORT_SYMBOL_GPL(vhost_poll_start); /* Stop polling a file. After this function returns, it becomes safe to drop the * file reference. You must also flush afterwards. */
void vhost_poll_stop(struct vhost_poll *poll) { if (poll->wqh) { remove_wait_queue(poll->wqh, &poll->wait); poll->wqh = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin2261.11%150.00%
Jason (Hui) Wang1438.89%150.00%
Total36100.00%2100.00%

EXPORT_SYMBOL_GPL(vhost_poll_stop);
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { struct vhost_flush_struct flush; if (dev->worker) { init_completion(&flush.wait_event); vhost_work_init(&flush.work, vhost_flush_work); vhost_work_queue(dev, &flush.work); wait_for_completion(&flush.wait_event); } }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang2844.44%120.00%
Michael S. Tsirkin1828.57%360.00%
Tejun Heo1726.98%120.00%
Total63100.00%5100.00%

EXPORT_SYMBOL_GPL(vhost_work_flush); /* Flush any work that has been scheduled. When calling this, don't hold any * locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll) { vhost_work_flush(poll->dev, &poll->work); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin1986.36%266.67%
Tejun Heo313.64%133.33%
Total22100.00%3100.00%

EXPORT_SYMBOL_GPL(vhost_poll_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) { if (!dev->worker) return; if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { /* We can only add the work to the list after we're * sure it was not in the list. * test_and_set_bit() implies a memory barrier. */ llist_add(&work->node, &dev->work_list); wake_up_process(dev->worker); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo3457.63%116.67%
Jason (Hui) Wang1220.34%116.67%
Michael S. Tsirkin1118.64%233.33%
Qin Chuanyu11.69%116.67%
Peng Tao11.69%116.67%
Total59100.00%6100.00%

EXPORT_SYMBOL_GPL(vhost_work_queue); /* A lockless hint for busy polling code to exit the loop */
bool vhost_has_work(struct vhost_dev *dev) { return !llist_empty(&dev->work_list); }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang20100.00%2100.00%
Total20100.00%2100.00%

EXPORT_SYMBOL_GPL(vhost_has_work);
void vhost_poll_queue(struct vhost_poll *poll) { vhost_work_queue(poll->dev, &poll->work); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin22100.00%1100.00%
Total22100.00%1100.00%

EXPORT_SYMBOL_GPL(vhost_poll_queue);
static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) { int j; for (j = 0; j < VHOST_NUM_ADDRS; j++) vq->meta_iotlb[j] = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang36100.00%1100.00%
Total36100.00%1100.00%


static void vhost_vq_meta_reset(struct vhost_dev *d) { int i; for (i = 0; i < d->nvqs; ++i) __vhost_vq_meta_reset(d->vqs[i]); }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang39100.00%1100.00%
Total39100.00%1100.00%


static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { vq->num = 1; vq->desc = NULL; vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; vq->signalled_used_valid = false; vq->used_flags = 0; vq->log_used = false; vq->log_addr = -1ull; vq->private_data = NULL; vq->acked_features = 0; vq->log_base = NULL; vq->error_ctx = NULL; vq->error = NULL; vq->kick = NULL; vq->call_ctx = NULL; vq->call = NULL; vq->log_ctx = NULL; vhost_reset_is_le(vq); vhost_disable_cross_endian(vq); vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; __vhost_vq_meta_reset(vq); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin14381.25%440.00%
Jason (Hui) Wang2313.07%440.00%
Greg Kurz105.68%220.00%
Total176100.00%10100.00%


static int vhost_worker(void *data) { struct vhost_dev *dev = data; struct vhost_work *work, *work_next; struct llist_node *node; mm_segment_t oldfs = get_fs(); set_fs(USER_DS); use_mm(dev->mm); for (;;) { /* mb paired w/ kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); break; } node = llist_del_all(&dev->work_list); if (!node) schedule(); node = llist_reverse_order(node); /* make sure flag is seen after deletion */ smp_wmb(); llist_for_each_entry_safe(work, work_next, node, node) { clear_bit(VHOST_WORK_QUEUED, &work->flags); __set_current_state(TASK_RUNNING); work->fn(work); if (need_resched()) schedule(); } } unuse_mm(dev->mm); set_fs(oldfs); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo7949.38%120.00%
Jason (Hui) Wang3924.38%120.00%
Michael S. Tsirkin1811.25%120.00%
Jens Freimann1610.00%120.00%
Nadav Har'El85.00%120.00%
Total160100.00%5100.00%


static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) { kfree(vq->indirect); vq->indirect = NULL; kfree(vq->log); vq->log = NULL; kfree(vq->heads); vq->heads = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin50100.00%1100.00%
Total50100.00%1100.00%

/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) { struct vhost_virtqueue *vq; int i; for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV, GFP_KERNEL); vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL); vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL); if (!vq->indirect || !vq->log || !vq->heads) goto err_nomem; } return 0; err_nomem: for (; i >= 0; --i) vhost_vq_free_iovecs(dev->vqs[i]); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang11881.38%125.00%
Asias He2617.93%250.00%
Michael S. Tsirkin10.69%125.00%
Total145100.00%4100.00%


static void vhost_dev_free_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) vhost_vq_free_iovecs(dev->vqs[i]); }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang3897.44%150.00%
Michael S. Tsirkin12.56%150.00%
Total39100.00%2100.00%


void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs) { struct vhost_virtqueue *vq; int i; dev->vqs = vqs; dev->nvqs = nvqs; mutex_init(&dev->mutex); dev->log_ctx = NULL; dev->log_file = NULL; dev->umem = NULL; dev->iotlb = NULL; dev->mm = NULL; dev->worker = NULL; init_llist_head(&dev->work_list); init_waitqueue_head(&dev->wait); INIT_LIST_HEAD(&dev->read_list); INIT_LIST_HEAD(&dev->pending_list); spin_lock_init(&dev->iotlb_lock); for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; vq->log = NULL; vq->indirect = NULL; vq->heads = NULL; vq->dev = dev; mutex_init(&vq->mutex); vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, POLLIN, dev); } }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin11655.24%111.11%
Jason (Hui) Wang5526.19%444.44%
Asias He2612.38%222.22%
Tejun Heo125.71%111.11%
Zhi Yong Wu10.48%111.11%
Total210100.00%9100.00%

EXPORT_SYMBOL_GPL(vhost_dev_init); /* Caller should have device mutex */
long vhost_dev_check_owner(struct vhost_dev *dev) { /* Are you the owner? If not, I don't think you mean to do that */ return dev->mm == current->mm ? 0 : -EPERM; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin25100.00%1100.00%
Total25100.00%1100.00%

EXPORT_SYMBOL_GPL(vhost_dev_check_owner); struct vhost_attach_cgroups_struct { struct vhost_work work; struct task_struct *owner; int ret; };
static void vhost_attach_cgroups_work(struct vhost_work *work) { struct vhost_attach_cgroups_struct *s; s = container_of(work, struct vhost_attach_cgroups_struct, work); s->ret = cgroup_attach_task_all(s->owner, current); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin41100.00%1100.00%
Total41100.00%1100.00%


static int vhost_attach_cgroups(struct vhost_dev *dev) { struct vhost_attach_cgroups_struct attach; attach.owner = current; vhost_work_init(&attach.work, vhost_attach_cgroups_work); vhost_work_queue(dev, &attach.work); vhost_work_flush(dev, &attach.work); return attach.ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin56100.00%1100.00%
Total56100.00%1100.00%

/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev) { return dev->mm; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin15100.00%1100.00%
Total15100.00%1100.00%

EXPORT_SYMBOL_GPL(vhost_dev_has_owner); /* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev) { struct task_struct *worker; int err; /* Is there an owner already? */ if (vhost_dev_has_owner(dev)) { err = -EBUSY; goto err_mm; } /* No owner, become one */ dev->mm = get_task_mm(current); worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); if (IS_ERR(worker)) { err = PTR_ERR(worker); goto err_worker; } dev->worker = worker; wake_up_process(worker); /* avoid contributing to loadavg */ err = vhost_attach_cgroups(dev); if (err) goto err_cgroup; err = vhost_dev_alloc_iovecs(dev); if (err) goto err_cgroup; return 0; err_cgroup: kthread_stop(worker); dev->worker = NULL; err_worker: if (dev->mm) mmput(dev->mm); dev->mm = NULL; err_mm: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo8250.62%114.29%
Michael S. Tsirkin6640.74%571.43%
Jason (Hui) Wang148.64%114.29%
Total162100.00%7100.00%

EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
struct vhost_umem *vhost_dev_reset_owner_prepare(void) { return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin1571.43%360.00%
Michal Hocko314.29%120.00%
Jason (Hui) Wang314.29%120.00%
Total21100.00%5100.00%

EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); /* Caller should have device mutex */
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem) { int i; vhost_dev_cleanup(dev, true); /* Restore memory to default empty mapping. */ INIT_LIST_HEAD(&umem->umem_list); dev->umem = umem; /* We don't need VQ locks below since vhost_dev_cleanup makes sure * VQs aren't running. */ for (i = 0; i < dev->nvqs; ++i) dev->vqs[i]->umem = umem; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin5582.09%480.00%
Jason (Hui) Wang1217.91%120.00%
Total67100.00%5100.00%

EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
void vhost_dev_stop(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { vhost_poll_stop(&dev->vqs[i]->poll); vhost_poll_flush(&dev->vqs[i]->poll); } } }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin7494.87%375.00%
Asias He45.13%125.00%
Total78100.00%4100.00%

EXPORT_SYMBOL_GPL(vhost_dev_stop);
static void vhost_umem_free(struct vhost_umem *umem, struct vhost_umem_node *node) { vhost_umem_interval_tree_remove(node, &umem->umem_tree); list_del(&node->link); kfree(node); umem->numem--; }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang44100.00%1100.00%
Total44100.00%1100.00%


static void vhost_umem_clean(struct vhost_umem *umem) { struct vhost_umem_node *node, *tmp; if (!umem) return; list_for_each_entry_safe(node, tmp, &umem->umem_list, link) vhost_umem_free(umem, node); kvfree(umem); }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang47100.00%2100.00%
Total47100.00%2100.00%


static void vhost_clear_msg(struct vhost_dev *dev) { struct vhost_msg_node *node, *n; spin_lock(&dev->iotlb_lock); list_for_each_entry_safe(node, n, &dev->read_list, node) { list_del(&node->node); kfree(node); } list_for_each_entry_safe(node, n, &dev->pending_list, node) { list_del(&node->node); kfree(node); } spin_unlock(&dev->iotlb_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang85100.00%2100.00%
Total85100.00%2100.00%

/* Caller should have device mutex if and only if locked is set */
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) { int i; for (i = 0; i < dev->nvqs; ++i) { if (dev->vqs[i]->error_ctx) eventfd_ctx_put(dev->vqs[i]->error_ctx); if (dev->vqs[i]->error) fput(dev->vqs[i]->error); if (dev->vqs[i]->kick) fput(dev->vqs[i]->kick); if (dev->vqs[i]->call_ctx) eventfd_ctx_put(dev->vqs[i]->call_ctx); if (dev->vqs[i]->call) fput(dev->vqs[i]->call); vhost_vq_reset(dev, dev->vqs[i]); } vhost_dev_free_iovecs(dev); if (dev->log_ctx) eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; if (dev->log_file) fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ vhost_umem_clean(dev->umem); dev->umem = NULL; vhost_umem_clean(dev->iotlb); dev->iotlb = NULL; vhost_clear_msg(dev); wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM); WARN_ON(!llist_empty(&dev->work_list)); if (dev->worker) { kthread_stop(dev->worker); dev->worker = NULL; } if (dev->mm) mmput(dev->mm); dev->mm = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin21672.24%436.36%
Jason (Hui) Wang3913.04%436.36%
Tejun Heo186.02%19.09%
Eric Dumazet144.68%19.09%
Asias He124.01%19.09%
Total299100.00%11100.00%

EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
static int log_access_ok(void __user *