cregit-Linux how code gets into the kernel

Release 4.11 drivers/gpu/drm/i915/gvt/mmio.c

/*
 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * Authors:
 *    Ke Yu
 *    Kevin Tian <kevin.tian@intel.com>
 *    Dexuan Cui
 *
 * Contributors:
 *    Tina Zhang <tina.zhang@intel.com>
 *    Min He <min.he@intel.com>
 *    Niu Bing <bing.niu@intel.com>
 *    Zhi Wang <zhi.a.wang@intel.com>
 *
 */

#include "i915_drv.h"
#include "gvt.h"

/**
 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
 * @vgpu: a vGPU
 *
 * Returns:
 * Zero on success, negative error code if failed
 */

int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) { u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) & ~GENMASK(3, 0); return gpa - gttmmio_gpa; }

Contributors

PersonTokensPropCommitsCommitProp
Zhi Wang43100.00%1100.00%
Total43100.00%1100.00%

#define reg_is_mmio(gvt, reg) \ (reg >= 0 && reg < gvt->device_info.mmio_size) #define reg_is_gtt(gvt, reg) \ (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes, bool read) { struct intel_gvt *gvt = NULL; void *pt = NULL; unsigned int offset = 0; if (!vgpu || !p_data) return; gvt = vgpu->gvt; mutex_lock(&gvt->lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (reg_is_mmio(gvt, offset)) { if (read) intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); else intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); } else if (reg_is_gtt(gvt, offset) && vgpu->gtt.ggtt_mm->virtual_page_table) { offset -= gvt->device_info.gtt_start_offset; pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; if (read) memcpy(p_data, pt, bytes); else memcpy(pt, p_data, bytes); } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; /* Since we enter the failsafe mode early during guest boot, * guest may not have chance to set up its ppgtt table, so * there should not be any wp pages for guest. Keep the wp * related code here in case we need to handle it in furture. */ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); if (gp) { /* remove write protection to prevent furture traps */ intel_vgpu_clean_guest_page(vgpu, gp); if (read) intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); else intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); } } mutex_unlock(&gvt->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Min He258100.00%1100.00%
Total258100.00%1100.00%

/** * intel_vgpu_emulate_mmio_read - emulate MMIO read * @vgpu: a vGPU * @pa: guest physical address * @p_data: data return buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed */
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio; unsigned int offset = 0; int ret = -EINVAL; if (vgpu->failsafe) { failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); return 0; } mutex_lock(&gvt->lock); if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); if (gp) { ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); if (ret) { gvt_vgpu_err("guest page read error %d, " "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", ret, gp->gfn, pa, *(u32 *)p_data, bytes); } mutex_unlock(&gvt->lock); return ret; } } offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) goto err; if (reg_is_gtt(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) goto err; if (WARN_ON(bytes != 4 && bytes != 8)) goto err; if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) goto err; ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset, p_data, bytes); if (ret) goto err; mutex_unlock(&gvt->lock); return ret; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); mutex_unlock(&gvt->lock); return ret; } if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) goto err; if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, bytes))) goto err; } mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (mmio) { if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) goto err; if (WARN_ON(mmio->offset != offset)) goto err; } ret = mmio->read(vgpu, offset, p_data, bytes); } else { ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); if (!vgpu->mmio.disable_warn_untrack) { gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n", offset, bytes, *(u32 *)p_data); if (offset == 0x206c) { gvt_vgpu_err("------------------------------------------\n"); gvt_vgpu_err("likely triggers a gfx reset\n"); gvt_vgpu_err("------------------------------------------\n"); vgpu->mmio.disable_warn_untrack = true; } } } if (ret) goto err; intel_gvt_mmio_set_accessed(gvt, offset); mutex_unlock(&gvt->lock); return 0; err: gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", offset, bytes); mutex_unlock(&gvt->lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Zhi Wang47781.96%120.00%
Pei Zhang6811.68%120.00%
Min He244.12%120.00%
Tina Zhang101.72%120.00%
Jike Song30.52%120.00%
Total582100.00%5100.00%

/** * intel_vgpu_emulate_mmio_write - emulate MMIO write * @vgpu: a vGPU * @pa: guest physical address * @p_data: write data buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio; unsigned int offset = 0; u32 old_vreg = 0, old_sreg = 0; int ret = -EINVAL; if (vgpu->failsafe) { failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false); return 0; } mutex_lock(&gvt->lock); if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); if (gp) { ret = gp->handler(gp, pa, p_data, bytes); if (ret) { gvt_err("guest page write error %d, " "gfn 0x%lx, pa 0x%llx, " "var 0x%x, len %d\n", ret, gp->gfn, pa, *(u32 *)p_data, bytes); } mutex_unlock(&gvt->lock); return ret; } } offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) goto err; if (reg_is_gtt(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) goto err; if (WARN_ON(bytes != 4 && bytes != 8)) goto err; if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) goto err; ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset, p_data, bytes); if (ret) goto err; mutex_unlock(&gvt->lock); return ret; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); mutex_unlock(&gvt->lock); return ret; } mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (!mmio && !vgpu->mmio.disable_warn_untrack) gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n", vgpu->id, offset, bytes, *(u32 *)p_data); if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, bytes))) goto err; } if (mmio) { u64 ro_mask = mmio->ro_mask; if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) goto err; if (WARN_ON(mmio->offset != offset)) goto err; } if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { old_vreg = vgpu_vreg(vgpu, offset); old_sreg = vgpu_sreg(vgpu, offset); } if (!ro_mask) { ret = mmio->write(vgpu, offset, p_data, bytes); } else { /* Protect RO bits like HW */ u64 data = 0; /* all register bits are RO. */ if (ro_mask == ~(u64)0) { gvt_vgpu_err("try to write RO reg %x\n", offset); ret = 0; goto out; } /* keep the RO bits in the virtual register */ memcpy(&data, p_data, bytes); data &= ~mmio->ro_mask; data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask; ret = mmio->write(vgpu, offset, &data, bytes); } /* higher 16bits of mode ctl regs are mask bits for change */ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { u32 mask = vgpu_vreg(vgpu, offset) >> 16; vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | (vgpu_vreg(vgpu, offset) & mask); vgpu_sreg(vgpu, offset) = (old_sreg & ~mask) | (vgpu_sreg(vgpu, offset) & mask); } } else ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); if (ret) goto err; out: intel_gvt_mmio_set_accessed(gvt, offset); mutex_unlock(&gvt->lock); return 0; err: gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, bytes); mutex_unlock(&gvt->lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Zhi Wang71795.35%120.00%
Min He243.19%120.00%
Tina Zhang70.93%120.00%
Jike Song30.40%120.00%
Xinda Zhao10.13%120.00%
Total752100.00%5100.00%

/** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU * */
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu->mmio.disable_warn_untrack = false; }

Contributors

PersonTokensPropCommitsCommitProp
Changbin Du8791.58%266.67%
Xinda Zhao88.42%133.33%
Total95100.00%3100.00%

/** * intel_vgpu_init_mmio - init MMIO space * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed */
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); if (!vgpu->mmio.vreg) return -ENOMEM; vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; intel_vgpu_reset_mmio(vgpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Changbin Du75100.00%2100.00%
Total75100.00%2100.00%

/** * intel_vgpu_clean_mmio - clean MMIO space * @vgpu: a vGPU * */
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) { vfree(vgpu->mmio.vreg); vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Changbin Du33100.00%1100.00%
Total33100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Zhi Wang126467.56%110.00%
Min He30616.35%110.00%
Changbin Du19810.58%220.00%
Pei Zhang683.63%110.00%
Tina Zhang170.91%110.00%
Xinda Zhao90.48%220.00%
Jike Song60.32%110.00%
Zhenyu Wang30.16%110.00%
Total1871100.00%10100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.