cregit-Linux how code gets into the kernel

Release 4.7 virt/kvm/kvm_main.c

Directory: virt/kvm
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#include <kvm/iodev.h>

#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/syscore_ops.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/anon_inodes.h>
#include <linux/profile.h>
#include <linux/kvm_para.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/srcu.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>

#include <asm/processor.h>
#include <asm/io.h>
#include <asm/ioctl.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>

#include "coalesced_mmio.h"
#include "async_pf.h"
#include "vfio.h"


#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>

/* Worst case buffer size needed for holding an integer. */

#define ITOA_MAX_LEN 12

MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

/* Architectures should define their poll value according to the halt latency */

static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);

/* Default doubles per-vcpu halt_poll_ns. */

static unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);

/* Default resets per-vcpu halt_poll_ns . */

static unsigned int halt_poll_ns_shrink;
module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);

/*
 * Ordering of locks:
 *
 *      kvm->lock --> kvm->slots_lock --> kvm->irq_lock
 */


DEFINE_SPINLOCK(kvm_lock);
static DEFINE_RAW_SPINLOCK(kvm_count_lock);

LIST_HEAD(vm_list);


static cpumask_var_t cpus_hardware_enabled;

static int kvm_usage_count;

static atomic_t hardware_enable_failed;


struct kmem_cache *kvm_vcpu_cache;

EXPORT_SYMBOL_GPL(kvm_vcpu_cache);


static __read_mostly struct preempt_ops kvm_preempt_ops;


struct dentry *kvm_debugfs_dir;

EXPORT_SYMBOL_GPL(kvm_debugfs_dir);


static int kvm_debugfs_num_entries;

static const struct file_operations *stat_fops_per_vm[];

static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
				  unsigned long arg);
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);

static void kvm_io_bus_destroy(struct kvm_io_bus *bus);

static void kvm_release_pfn_dirty(kvm_pfn_t pfn);
static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);


__visible bool kvm_rebooting;

EXPORT_SYMBOL_GPL(kvm_rebooting);


static bool largepages_enabled = true;


bool kvm_is_reserved_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) return PageReserved(pfn_to_page(pfn)); return true; }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity933.33%112.50%
xiantao zhangxiantao zhang829.63%112.50%
andrea arcangeliandrea arcangeli725.93%337.50%
xiao guangrongxiao guangrong13.70%112.50%
ard biesheuvelard biesheuvel13.70%112.50%
dan williamsdan williams13.70%112.50%
Total27100.00%8100.00%

/* * Switches to specified vcpu, until a matching vcpu_put() */
int vcpu_load(struct kvm_vcpu *vcpu) { int cpu; if (mutex_lock_killable(&vcpu->mutex)) return -EINTR; cpu = get_cpu(); preempt_notifier_register(&vcpu->preempt_notifier); kvm_arch_vcpu_load(vcpu, cpu); put_cpu(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity2547.17%133.33%
sheng yangsheng yang1630.19%133.33%
michael s. tsirkinmichael s. tsirkin1222.64%133.33%
Total53100.00%3100.00%


void vcpu_put(struct kvm_vcpu *vcpu) { preempt_disable(); kvm_arch_vcpu_put(vcpu); preempt_notifier_unregister(&vcpu->preempt_notifier); preempt_enable(); mutex_unlock(&vcpu->mutex); }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity1848.65%125.00%
xiantao zhangxiantao zhang1129.73%125.00%
marcelo tosattimarcelo tosatti513.51%125.00%
sheng yangsheng yang38.11%125.00%
Total37100.00%4100.00%


static void ack_flush(void *_completed) { }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity888.89%150.00%
sheng yangsheng yang111.11%150.00%
Total9100.00%2100.00%


bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) { int i, cpu, me; cpumask_var_t cpus; bool called = true; struct kvm_vcpu *vcpu; zalloc_cpumask_var(&cpus, GFP_ATOMIC); me = get_cpu(); kvm_for_each_vcpu(i, vcpu, kvm) { kvm_make_request(req, vcpu); cpu = vcpu->cpu; /* Set ->requests bit before we read ->mode. */ smp_mb__after_atomic(); if (cpus != NULL && cpu != -1 && cpu != me && kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) cpumask_set_cpu(cpu, cpus); } if (unlikely(cpus == NULL)) smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); else if (!cpumask_empty(cpus)) smp_call_function_many(cpus, ack_flush, NULL, 1); else called = false; put_cpu(); free_cpumask_var(cpus); return called; }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity8654.09%19.09%
sheng yangsheng yang3220.13%327.27%
xiantao zhangxiantao zhang1610.06%19.09%
xiao guangrongxiao guangrong148.81%218.18%
marcelo tosattimarcelo tosatti63.77%19.09%
mark mcloughlinmark mcloughlin21.26%19.09%
lan tianyulan tianyu21.26%19.09%
tang chentang chen10.63%19.09%
Total159100.00%11100.00%

#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm) { /* * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in * kvm_make_all_cpus_request. */ long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); /* * We want to publish modifications to the page tables before reading * mode. Pairs with a memory barrier in arch-specific code. * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest * and smp_mb in walk_shadow_page_lockless_begin/end. * - powerpc: smp_mb in kvmppc_prepare_to_enter. * * There is already an smp_mb__after_atomic() before * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that * barrier here. */ if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); }

Contributors

PersonTokensPropCommitsCommitProp
xiao guangrongxiao guangrong1937.25%222.22%
xiantao zhangxiantao zhang1223.53%111.11%
avi kivityavi kivity815.69%111.11%
lan tianyulan tianyu611.76%111.11%
marcelo tosattimarcelo tosatti35.88%111.11%
mark mcloughlinmark mcloughlin11.96%111.11%
sheng yangsheng yang11.96%111.11%
tang chentang chen11.96%111.11%
Total51100.00%9100.00%

EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); #endif
void kvm_reload_remote_mmus(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); }

Contributors

PersonTokensPropCommitsCommitProp
xiantao zhangxiantao zhang847.06%120.00%
sheng yangsheng yang635.29%240.00%
avi kivityavi kivity211.76%120.00%
tang chentang chen15.88%120.00%
Total17100.00%5100.00%


int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; int r; mutex_init(&vcpu->mutex); vcpu->cpu = -1; vcpu->kvm = kvm; vcpu->vcpu_id = id; vcpu->pid = NULL; init_swait_queue_head(&vcpu->wq); kvm_async_pf_vcpu_init(vcpu); vcpu->pre_pcpu = -1; INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->run = page_address(page); kvm_vcpu_set_in_spin_loop(vcpu, false); kvm_vcpu_set_dy_eligible(vcpu, false); vcpu->preempted = false; r = kvm_arch_vcpu_init(vcpu); if (r < 0) goto fail_free_run; return 0; fail_free_run: free_page((unsigned long)vcpu->run); fail: return r; }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity7542.37%17.14%
sheng yangsheng yang3519.77%321.43%
raghavendra k t* (same as raghavendra koushik)raghavendra k t* (same as raghavendra koushik)2011.30%214.29%
feng wufeng wu158.47%17.14%
mark mcloughlinmark mcloughlin105.65%214.29%
xiantao zhangxiantao zhang63.39%17.14%
rik van rielrik van riel63.39%17.14%
gleb natapovgleb natapov52.82%17.14%
marcelo tosattimarcelo tosatti52.82%214.29%
Total177100.00%14100.00%

EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) { put_pid(vcpu->pid); kvm_arch_vcpu_uninit(vcpu); free_page((unsigned long)vcpu->run); }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity1339.39%125.00%
sheng yangsheng yang1236.36%125.00%
rik van rielrik van riel721.21%125.00%
xiantao zhangxiantao zhang13.03%125.00%
Total33100.00%4100.00%

EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) { return container_of(mn, struct kvm, mmu_notifier); }

Contributors

PersonTokensPropCommitsCommitProp
sheng yangsheng yang1352.00%250.00%
avi kivityavi kivity1144.00%125.00%
mark mcloughlinmark mcloughlin14.00%125.00%
Total25100.00%4100.00%


static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush, idx; /* * When ->invalidate_page runs, the linux pte has been zapped * already but the page is still allocated until * ->invalidate_page returns. So if we increase the sequence * here the kvm page fault will notice if the spte can't be * established because the page is going to be freed. If * instead the kvm page fault establishes the spte before * ->invalidate_page runs, kvm_unmap_hva will release it * before returning. * * The sequence increase only need to be seen at spin_unlock * time, and not at spin_lock time. * * Increasing the sequence after the spin_unlock would be * unsafe because the kvm page fault could then establish the * pte after kvm_unmap_hva returned, without noticing the page * is going to be freed. */ idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); kvm_arch_mmu_notifier_invalidate_page(kvm, address); srcu_read_unlock(&kvm->srcu, idx); }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity3229.91%111.11%
marcelo tosattimarcelo tosatti2220.56%111.11%
xiantao zhangxiantao zhang1514.02%111.11%
mark mcloughlinmark mcloughlin1110.28%111.11%
takuya yoshikawatakuya yoshikawa109.35%111.11%
tang chentang chen76.54%111.11%
sheng yangsheng yang65.61%222.22%
xiao guangrongxiao guangrong43.74%111.11%
Total107100.00%9100.00%


static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, pte_t pte) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; kvm_set_spte_hva(kvm, address, pte); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity3034.88%125.00%
marcelo tosattimarcelo tosatti2326.74%125.00%
xiantao zhangxiantao zhang2124.42%125.00%
sheng yangsheng yang1213.95%125.00%
Total86100.00%4100.00%


static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush = 0, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); /* * The count increase must become visible at unlock time as no * spte can be established without taking the mmu_lock and * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity3733.64%110.00%
marcelo tosattimarcelo tosatti2220.00%110.00%
sheng yangsheng yang1715.45%330.00%
takuya yoshikawatakuya yoshikawa1412.73%220.00%
xiantao zhangxiantao zhang109.09%110.00%
xiao guangrongxiao guangrong65.45%110.00%
mark mcloughlinmark mcloughlin43.64%110.00%
Total110100.00%10100.00%


static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); spin_lock(&kvm->mmu_lock); /* * This sequence increase will notify the kvm page fault that * the page that is going to be mapped in the spte could have * been freed. */ kvm->mmu_notifier_seq++; smp_wmb(); /* * The above sequence increase must be visible before the * below count decrease, which is ensured by the smp_wmb above * in conjunction with the smp_rmb in mmu_notifier_retry(). */ kvm->mmu_notifier_count--; spin_unlock(&kvm->mmu_lock); BUG_ON(kvm->mmu_notifier_count < 0); }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity3750.00%116.67%
sheng yangsheng yang2837.84%350.00%
xiantao zhangxiantao zhang56.76%116.67%
paul mackerraspaul mackerras45.41%116.67%
Total74100.00%6100.00%


static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); young = kvm_age_hva(kvm, start, end); if (young) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; }

Contributors

PersonTokensPropCommitsCommitProp
sheng yangsheng yang3030.61%342.86%
avi kivityavi kivity2929.59%114.29%
marcelo tosattimarcelo tosatti2222.45%114.29%
takuya yoshikawatakuya yoshikawa99.18%114.29%
andres lagar-cavillaandres lagar-cavilla88.16%114.29%
Total98100.00%7100.00%


static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); /* * Even though we do not flush TLB, this will still adversely * affect performance on pre-Haswell Intel EPT, where there is * no EPT Access Bit to clear so that we have to tear down EPT * tables instead. If we find this unacceptable, we can always * add a parameter to kvm_age_hva so that it effectively doesn't * do anything on clear_young. * * Also note that currently we never issue secondary TLB flushes * from clear_young, leaving this job up to the regular system * cadence. If we find this inaccurate, we might come up with a * more sophisticated heuristic later. */ young = kvm_age_hva(kvm, start, end); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov90100.00%1100.00%
Total90100.00%1100.00%


static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); young = kvm_test_age_hva(kvm, address); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli83100.00%1100.00%
Total83100.00%1100.00%


static void kvm_mmu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int idx; idx = srcu_read_lock(&kvm->srcu); kvm_arch_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); }

Contributors

PersonTokensPropCommitsCommitProp
lai jiangshanlai jiangshan2342.59%114.29%
sheng yangsheng yang1324.07%342.86%
avi kivityavi kivity1120.37%114.29%
xiantao zhangxiantao zhang611.11%114.29%
marcelo tosattimarcelo tosatti11.85%114.29%
Total54100.00%7100.00%

static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .invalidate_page = kvm_mmu_notifier_invalidate_page, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, .clear_young = kvm_mmu_notifier_clear_young, .test_young = kvm_mmu_notifier_test_young, .change_pte = kvm_mmu_notifier_change_pte, .release = kvm_mmu_notifier_release, };
static int kvm_init_mmu_notifier(struct kvm *kvm) { kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; return mmu_notifier_register(&kvm->mmu_notifier, current->mm); }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity33100.00%1100.00%
Total33100.00%1100.00%

#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
static int kvm_init_mmu_notifier(struct kvm *kvm) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
avi kivityavi kivity14100.00%1100.00%
Total14100.00%1100.00%

#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
static struct kvm_memslots *kvm_alloc_memslots(void) { int i; struct kvm_memslots *slots; slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); if (!slots) return NULL; /* * Init kvm generation close to the maximum to easily test the * code of handling generation number wrap-around. */ slots->generation = -150; for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) slots->id_to_index[i] = slots->memslots[i].id = i; return slots; }

Contributors

PersonTokensPropCommitsCommitProp
xiao guangrongxiao guangrong4455.70%266.67%
paolo bonzinipaolo bonzini3544.30%133.33%
Total79100.00%3100.00%


static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) { if (!memslot->dirty_bitmap) return; kvfree(memslot->dirty_bitmap); memslot->dirty_bitmap = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
paolo bonzinipaolo bonzini32100.00%1100.00%
Total32100.00%1100.00%

/* * Free any memory in @free but not in @dont. */
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { if (!dont || free->dirty_bitmap != dont->dirty_bitmap) kvm_destroy_dirty_bitmap(free); kvm_arch_free_memslot(kvm, free, dont); free->npages = 0; }

Contributors

PersonTokensPropCommitsCommitProp
paolo bonzinipaolo bonzini54100.00%1100.00%
Total54100.00%1100.00%


static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) { struct kvm_memory_slot *memslot; if (!slots) return; kvm_for_each_memslot(memslot, slots) kvm_free_memslot(kvm, memslot, NULL); kvfree(slots); }

Contributors

PersonTokensPropCommitsCommitProp
paolo bonzinipaolo bonzini4697.87%150.00%
xiao guangrongxiao guangrong12.13%150.00%
Total47100.00%2100.00%


static void kvm_destroy_vm_debugfs(struct kvm *kvm) { int i; if (!kvm->debugfs_dentry) return; debugfs_remove_recursive(kvm->debugfs_dentry); for (i = 0; i < kvm_debugfs_num_entries; i++) kfree(kvm->debugfs_stat_data[i]); kfree(kvm->debugfs_stat_data); }

Contributors

PersonTokensPropCommitsCommitProp
janosch frankjanosch frank59100.00%1100.00%
Total59100.00%1100.00%


static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) { char dir_name[ITOA_MAX_LEN * 2]; struct kvm_stat_data *stat_data; struct kvm_stats_debugfs_item *p; if (!debugfs_initialized()) return 0; snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); if (!kvm->debugfs_dentry) return -ENOMEM; kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, sizeof(*kvm->debugfs_stat_data), GFP_KERNEL); if (!kvm->debugfs_stat_data) return -ENOMEM; for (p = debugfs_entries; p->name; p++) { stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL); if (!stat_data) return -ENOMEM; stat_data->kvm = kvm; stat_data->offset = p->offset; kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; if (!debugfs_create_file(p->name, 0444, kvm->debugfs_dentry, stat_data, stat_fops_per_vm[p->kind])) return -ENOMEM; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
janosch frankjanosch frank206100.00%1100.00%
Total206100.00%1100.00%


static struct kvm *kvm_create_vm(unsigned long type) { int r, i; struct kvm *kvm = kvm_arch_alloc_vm(); if (!kvm) return ERR_PTR(-ENOMEM); spin_lock_init(&kvm->mmu_lock); atomic_inc(&current->mm->mm_count); kvm->mm = current->mm; kvm_eventfd_init(kvm); mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); mutex_init(&kvm->slots_lock); atomic_set(&kvm->users_count, 1); INIT_LIST_HEAD(&kvm->devices); r = kvm_arch_init_vm(kvm, type); if (r) goto out_err_no_disable; r = hardware_enable_all(); if (r) goto out_err_no_disable; #ifdef CONFIG_HAVE_KVM_IRQFD INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); r = -ENOMEM; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { kvm->memslots[i] = kvm_alloc_memslots(); if (!kvm->memslots[i]) goto out_err_no_srcu; } if (init_srcu_struct(&kvm->srcu)) goto out_err_no_srcu; if (init_srcu_struct(&kvm->irq_srcu)) goto out_err_no_irq_srcu; for (i = 0; i < KVM_NR_BUSES; i++) { kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); if (!kvm->buses[i]) goto out_err; } r = kvm_init_mmu_notifier(kvm); if (r) goto out_err; spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); preempt_notifier_inc(); return kvm; out_err: cleanup_srcu_struct(&kvm->irq_srcu); out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); out_err_no_srcu: hardware_disable_all(); out_err_no_disable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) kvm_free_memslots(kvm, kvm->memslots[i]); kvm_arch_free_vm(kvm); mmdrop(current->mm); return ERR_PTR(r); }

Contributors

PersonTokensPropCommitsCommitProp
paolo bonzinipaolo bonzini12530.56%523.81%
marcelo tosattimarcelo tosatti10826.41%314.29%
sheng yangsheng yang4210.27%314.29%
christian borntraegerchristian borntraeger297.09%14.76%
jan kiszkajan kiszka286.85%29.52%
avi kivityavi kivity184.40%14.76%
alexander grafalexander graf184.40%14.76%
mike waychisonmike waychison143.42%14.76%
xiantao zhangxiantao zhang122.93%14.76%
alex williamsonalex williamson71.71%14.76%
carsten ottecarsten otte51.22%14.76%
peter zijlstrapeter zijlstra30.73%14.76%
Total409100.00%21100.00%

/* * Avoid using vmalloc for a small buffer. * Should not be used when the size is statically known. */
void *kvm_kvzalloc(unsigned long size) { if (size > PAGE_SIZE) return vzalloc(size); else return kzalloc(size, GFP_KERNEL); }

Contributors

PersonTokensPropCommitsCommitProp
takuya yoshikawatakuya yoshikawa31100.00%2100.00%
Total31100.00%2100.00%


static void kvm_destroy_devices(struct kvm *kvm) { struct kvm_device *dev, *tmp; list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { list_del(&dev->vm_node); dev->ops->destroy(dev); } }

Contributors

PersonTokensPropCommitsCommitProp
scott woodscott wood3879.17%150.00%
geliang tanggeliang tang1020.83%150.00%
Total48100.00%2100.00%


static void kvm_destroy_vm(struct kvm *kvm) { int i; struct mm_struct *mm = kvm->mm; kvm_destroy_vm_debugfs(kvm); kvm_arch_sync_events(kvm); spin_lock(&kvm_lock); list_del(&kvm->vm_list); spin_unlock(&kvm_lock); kvm_free_irq_routing(kvm); for (i = 0; i < KVM_NR_BUSES; i++) kvm_io_bus_destroy(kvm->buses[i]); kvm_coalesced_mmio_free(kvm); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->