Release 4.11 virt/kvm/async_pf.c
/*
* kvm asynchronous fault support
*
* Copyright 2010 Red Hat, Inc.
*
* Author:
* Gleb Natapov <gleb@redhat.com>
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include "async_pf.h"
#include <trace/events/kvm.h>
static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present(vcpu, work);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dominik Dingel | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
#ifndef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present(vcpu, work);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dominik Dingel | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static struct kmem_cache *async_pf_cache;
int kvm_async_pf_init(void)
{
async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
if (!async_pf_cache)
return -ENOMEM;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
void kvm_async_pf_deinit(void)
{
kmem_cache_destroy(async_pf_cache);
async_pf_cache = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
{
INIT_LIST_HEAD(&vcpu->async_pf.done);
INIT_LIST_HEAD(&vcpu->async_pf.queue);
spin_lock_init(&vcpu->async_pf.lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
static void async_pf_execute(struct work_struct *work)
{
struct kvm_async_pf *apf =
container_of(work, struct kvm_async_pf, work);
struct mm_struct *mm = apf->mm;
struct kvm_vcpu *vcpu = apf->vcpu;
unsigned long addr = apf->addr;
gva_t gva = apf->gva;
int locked = 1;
might_sleep();
/*
* This work is run asynchromously to the task which owns
* mm and might be done in another context, so we must
* access remotely.
*/
down_read(&mm->mmap_sem);
get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
&locked);
if (locked)
up_read(&mm->mmap_sem);
kvm_async_page_present_sync(vcpu, apf);
spin_lock(&vcpu->async_pf.lock);
list_add_tail(&apf->link, &vcpu->async_pf.done);
apf->vcpu = NULL;
spin_unlock(&vcpu->async_pf.lock);
/*
* apf may be freed by kvm_check_async_pf_completion() after
* this point
*/
trace_kvm_async_pf_completed(addr, gva);
/*
* This memory barrier pairs with prepare_to_wait's set_current_state()
*/
smp_mb();
if (swait_active(&vcpu->wq))
swake_up(&vcpu->wq);
mmput(mm);
kvm_put_kvm(vcpu->kvm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 143 | 72.22% | 1 | 11.11% |
Lorenzo Stoakes | 33 | 16.67% | 1 | 11.11% |
Dominik Dingel | 7 | 3.54% | 1 | 11.11% |
Paolo Bonzini | 6 | 3.03% | 1 | 11.11% |
Kosuke Tatsukawa | 4 | 2.02% | 1 | 11.11% |
Oleg Nesterov | 2 | 1.01% | 2 | 22.22% |
Marcelo Tosatti | 2 | 1.01% | 1 | 11.11% |
Dave Hansen | 1 | 0.51% | 1 | 11.11% |
Total | 198 | 100.00% | 9 | 100.00% |
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->async_pf.lock);
/* cancel outstanding work queue item */
while (!list_empty(&vcpu->async_pf.queue)) {
struct kvm_async_pf *work =
list_first_entry(&vcpu->async_pf.queue,
typeof(*work), queue);
list_del(&work->queue);
/*
* We know it's present in vcpu->async_pf.done, do
* nothing here.
*/
if (!work->vcpu)
continue;
spin_unlock(&vcpu->async_pf.lock);
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
flush_work(&work->work);
#else
if (cancel_work_sync(&work->work)) {
mmput(work->mm);
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
#endif
spin_lock(&vcpu->async_pf.lock);
}
while (!list_empty(&vcpu->async_pf.done)) {
struct kvm_async_pf *work =
list_first_entry(&vcpu->async_pf.done,
typeof(*work), link);
list_del(&work->link);
kmem_cache_free(async_pf_cache, work);
}
spin_unlock(&vcpu->async_pf.lock);
vcpu->async_pf.queued = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 146 | 67.59% | 1 | 14.29% |
Paolo Bonzini | 30 | 13.89% | 1 | 14.29% |
Radim Krčmář | 20 | 9.26% | 2 | 28.57% |
Dominik Dingel | 15 | 6.94% | 1 | 14.29% |
Geliang Tang | 4 | 1.85% | 1 | 14.29% |
Oleg Nesterov | 1 | 0.46% | 1 | 14.29% |
Total | 216 | 100.00% | 7 | 100.00% |
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
while (!list_empty_careful(&vcpu->async_pf.done) &&
kvm_arch_can_inject_async_page_present(vcpu)) {
spin_lock(&vcpu->async_pf.lock);
work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
link);
list_del(&work->link);
spin_unlock(&vcpu->async_pf.lock);
kvm_arch_async_page_ready(vcpu, work);
kvm_async_page_present_async(vcpu, work);
list_del(&work->queue);
vcpu->async_pf.queued--;
kmem_cache_free(async_pf_cache, work);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 113 | 94.96% | 3 | 60.00% |
Xiao Guangrong | 5 | 4.20% | 1 | 20.00% |
Dominik Dingel | 1 | 0.84% | 1 | 20.00% |
Total | 119 | 100.00% | 5 | 100.00% |
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
struct kvm_arch_async_pf *arch)
{
struct kvm_async_pf *work;
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
return 0;
/* setup delayed work */
/*
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
if (!work)
return 0;
work->wakeup_all = false;
work->vcpu = vcpu;
work->gva = gva;
work->addr = hva;
work->arch = *arch;
work->mm = current->mm;
mmget(work->mm);
kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async
would succeed */
if (unlikely(kvm_is_error_hva(work->addr)))
goto retry_sync;
INIT_WORK(&work->work, async_pf_execute);
if (!schedule_work(&work->work))
goto retry_sync;
list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu->async_pf.queued++;
kvm_arch_async_page_not_present(vcpu, work);
return 1;
retry_sync:
kvm_put_kvm(work->vcpu->kvm);
mmput(work->mm);
kmem_cache_free(async_pf_cache, work);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 206 | 95.37% | 1 | 16.67% |
Dominik Dingel | 4 | 1.85% | 1 | 16.67% |
chai wen | 2 | 0.93% | 1 | 16.67% |
Christian Bornträger | 2 | 0.93% | 1 | 16.67% |
Vegard Nossum | 1 | 0.46% | 1 | 16.67% |
Oleg Nesterov | 1 | 0.46% | 1 | 16.67% |
Total | 216 | 100.00% | 6 | 100.00% |
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
if (!list_empty_careful(&vcpu->async_pf.done))
return 0;
work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
if (!work)
return -ENOMEM;
work->wakeup_all = true;
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
spin_lock(&vcpu->async_pf.lock);
list_add_tail(&work->link, &vcpu->async_pf.done);
spin_unlock(&vcpu->async_pf.lock);
vcpu->async_pf.queued++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 86 | 78.90% | 1 | 33.33% |
Xiao Guangrong | 21 | 19.27% | 1 | 33.33% |
chai wen | 2 | 1.83% | 1 | 33.33% |
Total | 109 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gleb Natapov | 803 | 78.11% | 4 | 17.39% |
Dominik Dingel | 85 | 8.27% | 3 | 13.04% |
Paolo Bonzini | 36 | 3.50% | 1 | 4.35% |
Lorenzo Stoakes | 33 | 3.21% | 1 | 4.35% |
Xiao Guangrong | 26 | 2.53% | 2 | 8.70% |
Radim Krčmář | 20 | 1.95% | 2 | 8.70% |
Geliang Tang | 4 | 0.39% | 1 | 4.35% |
Kosuke Tatsukawa | 4 | 0.39% | 1 | 4.35% |
chai wen | 4 | 0.39% | 1 | 4.35% |
Oleg Nesterov | 4 | 0.39% | 2 | 8.70% |
Ingo Molnar | 3 | 0.29% | 1 | 4.35% |
Christian Bornträger | 2 | 0.19% | 1 | 4.35% |
Marcelo Tosatti | 2 | 0.19% | 1 | 4.35% |
Vegard Nossum | 1 | 0.10% | 1 | 4.35% |
Dave Hansen | 1 | 0.10% | 1 | 4.35% |
Total | 1028 | 100.00% | 23 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.