Release 4.14 arch/powerpc/kvm/booke.c
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
* Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
* Scott Wood <scottwood@freescale.com>
* Varun Sethi <varun.sethi@freescale.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <asm/cputable.h>
#include <linux/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/cacheflush.h>
#include <asm/dbell.h>
#include <asm/hw_irq.h>
#include <asm/irq.h>
#include <asm/time.h>
#include "timing.h"
#include "booke.h"
#define CREATE_TRACE_POINTS
#include "trace_booke.h"
unsigned long kvmppc_booke_handlers;
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "mmio", VCPU_STAT(mmio_exits) },
{ "sig", VCPU_STAT(signal_exits) },
{ "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
{ "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
{ "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
{ "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
{ "sysc", VCPU_STAT(syscall_exits) },
{ "isi", VCPU_STAT(isi_exits) },
{ "dsi", VCPU_STAT(dsi_exits) },
{ "inst_emu", VCPU_STAT(emulated_inst_exits) },
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "doorbell", VCPU_STAT(dbell_exits) },
{ "guest doorbell", VCPU_STAT(gdbell_exits) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ NULL }
};
/* TODO: use vcpu_printf() */
void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
{
int i;
printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
vcpu->arch.shared->srr1);
printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
for (i = 0; i < 32; i += 4) {
printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
kvmppc_get_gpr(vcpu, i),
kvmppc_get_gpr(vcpu, i+1),
kvmppc_get_gpr(vcpu, i+2),
kvmppc_get_gpr(vcpu, i+3));
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hollis Blanchard | 114 | 82.61% | 2 | 40.00% |
Alexander Graf | 24 | 17.39% | 3 | 60.00% |
Total | 138 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_SPE
void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
{
preempt_disable();
enable_kernel_spe();
kvmppc_save_guest_spe(vcpu);
disable_kernel_spe();
vcpu->arch.shadow_msr &= ~MSR_SPE;
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 33 | 91.67% | 1 | 50.00% |
Anton Blanchard | 3 | 8.33% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
{
preempt_disable();
enable_kernel_spe();
kvmppc_load_guest_spe(vcpu);
disable_kernel_spe();
vcpu->arch.shadow_msr |= MSR_SPE;
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 33 | 91.67% | 1 | 50.00% |
Anton Blanchard | 3 | 8.33% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.shared->msr & MSR_SPE) {
if (!(vcpu->arch.shadow_msr & MSR_SPE))
kvmppc_vcpu_enable_spe(vcpu);
} else if (vcpu->arch.shadow_msr & MSR_SPE) {
kvmppc_vcpu_disable_spe(vcpu);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 61 | 100.00% | 1 | 100.00% |
Total | 61 | 100.00% | 1 | 100.00% |
#else
static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
#endif
/*
* Load up guest vcpu FP state if it's needed.
* It also set the MSR_FP in thread so that host know
* we're holding FPU, and then host can help to save
* guest vcpu FP state if other threads require to use FPU.
* This simulates an FP unavailable fault.
*
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (!(current->thread.regs->msr & MSR_FP)) {
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
disable_kernel_fp();
current->thread.fp_save_area = &vcpu->arch.fp;
current->thread.regs->msr |= MSR_FP;
}
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mihai Caraman | 70 | 95.89% | 1 | 50.00% |
Anton Blanchard | 3 | 4.11% | 1 | 50.00% |
Total | 73 | 100.00% | 2 | 100.00% |
/*
* Save guest vcpu FP state into thread.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
current->thread.fp_save_area = NULL;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mihai Caraman | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
{
#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
/* We always treat the FP bit as enabled from the host
perspective, so only need to adjust the shadow MSR */
vcpu->arch.shadow_msr &= ~MSR_FP;
vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Graf | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
/*
* Simulate AltiVec unavailable fault to load guest state
* from thread to AltiVec unit.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
if (!(current->thread.regs->msr & MSR_VEC)) {
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
disable_kernel_altivec();
current->thread.vr_save_area = &vcpu->arch.vr;
current->thread.regs->msr |= MSR_VEC;
}
}
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mihai Caraman | 79 | 96.34% | 1 | 50.00% |
Anton Blanchard | 3 | 3.66% | 1 | 50.00% |
Total | 82 | 100.00% | 2 | 100.00% |
/*
* Save guest vcpu AltiVec state into thread.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
current->thread.vr_save_area = NULL;
}
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mihai Caraman | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
{
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
#ifndef CONFIG_KVM_BOOKE_HV
vcpu->arch.shadow_msr &= ~MSR_DE;
vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
#endif
/* Force enable debug interrupts when user space wants to debug */
if (vcpu->guest_debug) {
#ifdef CONFIG_KVM_BOOKE_HV
/*
* Since there is no shadow MSR, sync MSR_DE into the guest
* visible MSR.
*/
vcpu->arch.shared->msr |= MSR_DE;
#else
vcpu->arch.shadow_msr |= MSR_DE;
vcpu->arch.shared->msr &= ~MSR_DE;
#endif
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 88 | 100.00% | 1 | 100.00% |
Total | 88 | 100.00% | 1 | 100.00% |
/*
* Helper function for "full" MSR writes. No need to call this if only
* EE/CE/ME/DE/RI are changing.
*/
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
{
u32 old_msr = vcpu->arch.shared->msr;
#ifdef CONFIG_KVM_BOOKE_HV
new_msr |= MSR_GS;
#endif
vcpu->arch.shared->msr = new_msr;
kvmppc_mmu_msr_notify(vcpu, old_msr);
kvmppc_vcpu_sync_spe(vcpu);
kvmppc_vcpu_sync_fpu(vcpu);
kvmppc_vcpu_sync_debug(vcpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 44 | 67.69% | 2 | 40.00% |
Liu Yu | 11 | 16.92% | 1 | 20.00% |
Alexander Graf | 5 | 7.69% | 1 | 20.00% |
Bharat Bhushan | 5 | 7.69% | 1 | 20.00% |
Total | 65 | 100.00% | 5 | 100.00% |
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
unsigned int priority)
{
trace_kvm_booke_queue_irqprio(vcpu, priority);
set_bit(priority, &vcpu->arch.pending_exceptions);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hollis Blanchard | 27 | 79.41% | 2 | 66.67% |
Alexander Graf | 7 | 20.59% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
ulong dear_flags, ulong esr_flags)
{
vcpu->arch.queued_dear = dear_flags;
vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Liu Yu | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
ulong dear_flags, ulong esr_flags)
{
vcpu->arch.queued_dear = dear_flags;
vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Liu Yu | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Graf | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
{
vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Liu Yu | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
ulong esr_flags)
{
vcpu->arch.queued_dear = dear_flags;
vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Graf | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
{
vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hollis Blanchard | 17 | 60.71% | 2 | 50.00% |
Liu Yu | 9 | 32.14% | 1 | 25.00% |
Alexander Graf | 2 | 7.14% | 1 | 25.00% |
Total | 28 | 100.00% | 4 | 100.00% |
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hollis Blanchard | 17 | 100.00% | 2 | 100.00% |
Total | 17 | 100.00% | 2 | 100.00% |
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hollis Blanchard | 23 | 100.00% | 2 | 100.00% |
Total | 23 | 100.00% | 2 | 100.00% |
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
{
clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Graf | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
kvmppc_booke_queue_irqprio(vcpu, prio);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hollis Blanchard | 21 | 52.50% | 2 | 66.67% |
Alexander Graf | 19 | 47.50% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
{
clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Graf | 34 | 100.00% | 2 | 100.00% |
Total | 34 | 100.00% | 2 | 100.00% |
static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
{
clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
{
clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
{
kvmppc_set_srr0(vcpu, srr0);
kvmppc_set_srr1(vcpu, srr1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 15 | 46.88% | 1 | 16.67% |
Hollis Blanchard | 10 | 31.25% | 2 | 33.33% |
Bharat Bhushan | 4 | 12.50% | 1 | 16.67% |
Asias He | 2 | 6.25% | 1 | 16.67% |
Alexander Graf | 1 | 3.12% | 1 | 16.67% |
Total | 32 | 100.00% | 6 | 100.00% |
static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
{
vcpu->arch.csrr0 = srr0;
vcpu->arch.csrr1 = srr1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 27 | 79.41% | 1 | 50.00% |
Alexander Graf | 7 | 20.59% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
{
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
vcpu->arch.dsrr0 = srr0;
vcpu->arch.dsrr1 = srr1;
} else {
set_guest_csrr(vcpu, srr0, srr1);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 45 | 81.82% | 1 | 33.33% |
Alexander Graf | 9 | 16.36% | 1 | 33.33% |
Hollis Blanchard | 1 | 1.82% | 1 | 33.33% |
Total | 55 | 100.00% | 3 | 100.00% |
static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
{
vcpu->arch.mcsrr0 = srr0;
vcpu->arch.mcsrr1 = srr1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
{
int allowed = 0;
ulong msr_mask = 0;
bool update_esr = false, update_dear = false, update_epr = false;
ulong crit_raw = vcpu->arch.shared->critical;
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
bool crit;
bool keep_irq = false;
enum int_class int_class;
ulong new_msr = vcpu->arch.shared->msr;
/* Truncate crit indicators in 32 bit mode */
if (!(vcpu->arch.shared->msr & MSR_SF)) {
crit_raw &= 0xffffffff;
crit_r1 &= 0xffffffff;
}
/* Critical section when crit == r1 */
crit = (crit_raw == crit_r1);
/* ... and we're in supervisor mode */
crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
priority = BOOKE_IRQPRIO_EXTERNAL;
keep_irq = true;
}
if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
update_epr = true;
switch (priority) {
case BOOKE_IRQPRIO_DTLB_MISS:
case BOOKE_IRQPRIO_DATA_STORAGE:
case BOOKE_IRQPRIO_ALIGNMENT:
update_dear = true;
/* fall through */
case BOOKE_IRQPRIO_INST_STORAGE:
case BOOKE_IRQPRIO_PROGRAM:
update_esr = true;
/* fall through */
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
#ifdef CONFIG_SPE_POSSIBLE
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
#endif
#ifdef CONFIG_ALTIVEC
case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
#endif
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
int_class = INT_CLASS_NONCRIT;
break;
case BOOKE_IRQPRIO_WATCHDOG:
case BOOKE_IRQPRIO_CRITICAL:
case BOOKE_IRQPRIO_DBELL_CRIT:
allowed = vcpu->arch.shared->msr & MSR_CE;
allowed = allowed && !crit;
msr_mask = MSR_ME;
int_class = INT_CLASS_CRIT;
break;
case BOOKE_IRQPRIO_MACHINE_CHECK:
allowed = vcpu->arch.shared->msr & MSR_ME;
allowed = allowed && !crit;
int_class = INT_CLASS_MC;
break;
case BOOKE_IRQPRIO_DECREMENTER:
case BOOKE_IRQPRIO_FIT:
keep_irq = true;
/* fall through */
case BOOKE_IRQPRIO_EXTERNAL:
case BOOKE_IRQPRIO_DBELL:
allowed = vcpu->arch.shared->msr & MSR_EE;
allowed = allowed && !crit;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
int_class = INT_CLASS_NONCRIT;
break;
case BOOKE_IRQPRIO_DEBUG:
allowed = vcpu->arch.shared->msr & MSR_DE;
allowed = allowed && !crit;
msr_mask = MSR_ME;
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
int_class = INT_CLASS_DBG;
else
int_class = INT_CLASS_CRIT;
break;
}
if (allowed) {
switch (int_class) {
case INT_CLASS_NONCRIT:
set_guest_srr(vcpu, vcpu->arch.pc,
vcpu->arch.shared->msr);
break;
case INT_CLASS_CRIT:
set_guest_csrr(vcpu, vcpu->arch.pc,
vcpu->arch.shared->msr);
break;
case INT_CLASS_DBG:
set_guest_dsrr(vcpu, vcpu->arch.pc,
vcpu->arch.shared->msr);
break;
case INT_CLASS_MC:
set_guest_mcsrr(vcpu, vcpu->arch.pc,
vcpu->arch.shared->msr);
break;
}
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
if (update_esr == true)
kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true)
kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
if (update_epr == true) {
if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
kvmppc_mpic_set_epr(vcpu);
}
}
new_msr &= msr_mask;
#if defined(CONFIG_64BIT)
if (vcpu->arch.epcr & SPRN_EPCR_ICM)
new_msr |= MSR_CM;
#endif
kvmppc_set_msr(vcpu, new_msr);
if (!keep_irq)
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
#ifdef CONFIG_KVM_BOOKE_HV
/*
* If an interrupt is pending but masked, raise a guest doorbell
* so that we are notified when the guest enables the relevant
* MSR bit.
*/
if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
#endif
return allowed;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 322 | 43.93% | 4 | 19.05% |
Hollis Blanchard | 155 | 21.15% | 3 | 14.29% |
Bharat Bhushan | 123 | 16.78% | 4 | 19.05% |
Alexander Graf | 66 | 9.00% | 7 | 33.33% |
Mihai Caraman | 52 | 7.09% | 2 | 9.52% |
Liu Yu | 15 | 2.05% | 1 | 4.76% |
Total | 733 | 100.00% | 21 | 100.00% |
/*
* Return the number of jiffies until the next timeout. If the timeout is
* longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
* because the larger value can break the timer APIs.
*/
static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
{
u64 tb, wdt_tb, wdt_ticks = 0;
u64 nr_jiffies = 0;
u32 period = TCR_GET_WP(vcpu->arch.tcr);
wdt_tb = 1ULL << (63 - period);
tb = get_tb();
/*
* The watchdog timeout will hapeen when TB bit corresponding
* to watchdog will toggle from 0 to 1.
*/
if (tb & wdt_tb)
wdt_ticks = wdt_tb;
wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
/* Convert timebase ticks to jiffies */
nr_jiffies = wdt_ticks;
if (do_div(nr_jiffies, tb_ticks_per_jiffy))
nr_jiffies++;
return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 105 | 100.00% | 1 | 100.00% |
Total | 105 | 100.00% | 1 | 100.00% |
static void arm_next_watchdog(struct kvm_vcpu *vcpu)
{
unsigned long nr_jiffies;
unsigned long flags;
/*
* If TSR_ENW and TSR_WIS are not set then no need to exit to
* userspace, so clear the KVM_REQ_WATCHDOG request.
*/
if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
nr_jiffies = watchdog_next_timeout(vcpu);
/*
* If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
* then do not run the watchdog timer as this can break timer APIs.
*/
if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
else
del_timer(&vcpu->arch.wdt_timer);
spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 111 | 99.11% | 1 | 50.00% |
Radim Krčmář | 1 | 0.89% | 1 | 50.00% |
Total | 112 | 100.00% | 2 | 100.00% |
void kvmppc_watchdog_func(unsigned long data)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
u32 tsr, new_tsr;
int final;
do {
new_tsr = tsr = vcpu->arch.tsr;
final = 0;
/* Time out event */
if (tsr & TSR_ENW) {
if (tsr & TSR_WIS)
final = 1;
else
new_tsr = tsr | TSR_WIS;
} else {
new_tsr = tsr | TSR_ENW;
}
} while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
if (new_tsr & TSR_WIS) {
smp_wmb();
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
kvm_vcpu_kick(vcpu);
}
/*
* If this is final watchdog expiry and some action is required
* then exit to userspace.
*/
if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
vcpu->arch.watchdog_enabled) {
smp_wmb();
kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
kvm_vcpu_kick(vcpu);
}
/*
* Stop running the watchdog timer after final expiration to
* prevent the host from being flooded with timers if the
* guest sets a short period.
* Timers will resume when TSR/TCR is updated next time.
*/
if (!final)
arm_next_watchdog(vcpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bharat Bhushan | 132 | 76.74% | 1 | 16.67% |
Scott Wood | 24 | 13.95% | 1 | 16.67% |
Hollis Blanchard | 9 | 5.23% | 2 | 33.33% |
Liu Yu | 5 | 2.91% | 1 | 16.67% |
Alexander Graf | 2 | 1.16% | 1 | 16.67% |
Total | 172 | 100.00% | 6 | 100.00% |
static void update_timer_ints(struct kvm_vcpu *vcpu)
{
if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
kvmppc_core_queue_dec(vcpu);
else
kvmppc_core_dequeue_dec(vcpu);
if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
kvmppc_core_queue_watchdog(vcpu);
else
kvmppc_core_dequeue_watchdog(vcpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Wood | 44 | 57.14% | 1 | 50.00% |
Bharat Bhushan | 33 | 42.86% | 1 | 50.00% |
Total | 77 | 100.00% | 2 | 100.00% |
static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned int priority;
priority = __ffs(*pending);
while (priority < BOOKE_IRQPRIO_MAX) {
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
break;
priority = find_next_bit(pending,
BITS_PER_BYTE * sizeof(*pending),
priority + 1);
}
/* Tell the guest about our interrupt status */
vcpu->arch.shared->int_pending = !!*pending;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hollis Blanchard | 69 | 80.23% | 4 | 50.00% |
Alexander Graf | 11 | 12.79% | 2 | 25.00% |
Scott Wood | 6 | 6.98% | 2 | 25.00% |
Total | 86 | 100.00% | 8 | 100.00% |
/* Check pending exceptions and deliver one, if possible. */
int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{
int r = 0;
WARN_ON_ONCE(!irqs_disabled());
kvmppc_core_check_exceptions(vcpu);
if (kvm_request_pending(vcpu)) {
/* Exception delivery raised request; start over */
return 1;
}
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
hard_irq_disable();
kvmppc_set_exit_type(vcpu,