Release 4.14 arch/mips/kvm/dyntrans.c
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
#include <asm/cacheflush.h>
#include "commpage.h"
/**
* kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
* @vcpu: Virtual CPU.
* @opc: PC of instruction to replace.
* @replace: Instruction to write
*/
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
union mips_instruction replace)
{
unsigned long vaddr = (unsigned long)opc;
int err;
retry:
/* The GVA page table is still active so use the Linux TLB handlers */
kvm_trap_emul_gva_lockless_begin(vcpu);
err = put_user(replace.word, opc);
kvm_trap_emul_gva_lockless_end(vcpu);
if (unlikely(err)) {
/*
* We write protect clean pages in GVA page table so normal
* Linux TLB mod handler doesn't silently dirty the page.
* Its also possible we raced with a GVA invalidation.
* Try to force the page to become dirty.
*/
err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
if (unlikely(err)) {
kvm_info("%s: Address unwriteable: %p\n",
__func__, opc);
return -EFAULT;
}
/*
* Try again. This will likely trigger a TLB refill, which will
* fetch the new dirty entry from the GVA page table, which
* should then succeed.
*/
goto retry;
}
__local_flush_icache_user_range(vaddr, vaddr + 4);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Hogan | 99 | 86.09% | 6 | 75.00% |
Sanjay Lal | 15 | 13.04% | 1 | 12.50% |
Deng-Cheng Zhu | 1 | 0.87% | 1 | 12.50% |
Total | 115 | 100.00% | 8 | 100.00% |
int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction nop_inst = { 0 };
/* Replace the CACHE instruction, with a NOP */
return kvm_mips_trans_replace(vcpu, opc, nop_inst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Hogan | 34 | 91.89% | 2 | 66.67% |
Sanjay Lal | 3 | 8.11% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
/*
* Address based CACHE instructions are transformed into synci(s). A little
* heavy for just D-cache invalidates, but avoids an expensive trap
*/
int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction synci_inst = { 0 };
synci_inst.i_format.opcode = bcond_op;
synci_inst.i_format.rs = inst.i_format.rs;
synci_inst.i_format.rt = synci_op;
if (cpu_has_mips_r6)
synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
else
synci_inst.i_format.simmediate = inst.i_format.simmediate;
return kvm_mips_trans_replace(vcpu, opc, synci_inst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Hogan | 58 | 62.37% | 4 | 66.67% |
Sanjay Lal | 34 | 36.56% | 1 | 16.67% |
Deng-Cheng Zhu | 1 | 1.08% | 1 | 16.67% |
Total | 93 | 100.00% | 6 | 100.00% |
int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction mfc0_inst = { 0 };
u32 rd, sel;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
if (rd == MIPS_CP0_ERRCTL && sel == 0) {
mfc0_inst.r_format.opcode = spec_op;
mfc0_inst.r_format.rd = inst.c0r_format.rt;
mfc0_inst.r_format.func = add_op;
} else {
mfc0_inst.i_format.opcode = lw_op;
mfc0_inst.i_format.rt = inst.c0r_format.rt;
mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
#ifdef CONFIG_CPU_BIG_ENDIAN
if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
mfc0_inst.i_format.simmediate |= 4;
#endif
}
return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Hogan | 102 | 57.30% | 7 | 77.78% |
Sanjay Lal | 75 | 42.13% | 1 | 11.11% |
Deng-Cheng Zhu | 1 | 0.56% | 1 | 11.11% |
Total | 178 | 100.00% | 9 | 100.00% |
int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction mtc0_inst = { 0 };
u32 rd, sel;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
mtc0_inst.i_format.opcode = sw_op;
mtc0_inst.i_format.rt = inst.c0r_format.rt;
mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
#ifdef CONFIG_CPU_BIG_ENDIAN
if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
mtc0_inst.i_format.simmediate |= 4;
#endif
return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Hogan | 83 | 61.48% | 7 | 77.78% |
Sanjay Lal | 51 | 37.78% | 1 | 11.11% |
Deng-Cheng Zhu | 1 | 0.74% | 1 | 11.11% |
Total | 135 | 100.00% | 9 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Hogan | 386 | 65.31% | 12 | 80.00% |
Sanjay Lal | 198 | 33.50% | 1 | 6.67% |
Deng-Cheng Zhu | 7 | 1.18% | 2 | 13.33% |
Total | 591 | 100.00% | 15 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.