cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/kvm/book3s_hv_rm_mmu.c

Directory: arch/powerpc/kvm
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 */

#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/log2.h>

#include <asm/tlbflush.h>
#include <asm/trace.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/pte-walk.h>

/* Translate address of a vmalloc'd thing to a linear map address */

static void *real_vmalloc_addr(void *x) { unsigned long addr = (unsigned long) x; pte_t *p; /* * assume we don't have huge pages in vmalloc space... * So don't worry about THP collapse/split. Called * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore. */ p = find_init_mm_pte(addr, NULL); if (!p || !pte_present(*p)) return NULL; addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); return __va(addr); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras7194.67%133.33%
Aneesh Kumar K.V45.33%266.67%
Total75100.00%3100.00%

/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
static int global_invalidates(struct kvm *kvm, unsigned long flags) { int global; int cpu; /* * If there is only one vcore, and it's currently running, * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, * we can use tlbiel as long as we mark all other physical * cores as potentially having stale TLB entries for this lpid. * Otherwise, don't use tlbiel. */ if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) global = 0; else global = 1; if (!global) { /* any other core might now have stale TLB entries... */ smp_wmb(); cpumask_setall(&kvm->arch.need_tlb_flush); cpu = local_paca->kvm_hstate.kvm_vcore->pcpu; /* * On POWER9, threads are independent but the TLB is shared, * so use the bit for the first thread to represent the core. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) cpu = cpu_first_thread_sibling(cpu); cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush); } return global; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras108100.00%4100.00%
Total108100.00%4100.00%

/* * Add this HPTE into the chain for the real page. * Must be called with the chain locked; it unlocks the chain. */
void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, unsigned long *rmap, long pte_index, int realmode) { struct revmap_entry *head, *tail; unsigned long i; if (*rmap & KVMPPC_RMAP_PRESENT) { i = *rmap & KVMPPC_RMAP_INDEX; head = &kvm->arch.hpt.rev[i]; if (realmode) head = real_vmalloc_addr(head); tail = &kvm->arch.hpt.rev[head->back]; if (realmode) tail = real_vmalloc_addr(tail); rev->forw = i; rev->back = head->back; tail->forw = pte_index; head->back = pte_index; } else { rev->forw = rev->back = pte_index; *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | pte_index | KVMPPC_RMAP_PRESENT; } unlock_rmap(rmap); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras15996.36%266.67%
David Gibson63.64%133.33%
Total165100.00%3100.00%

EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); /* Update the changed page order field of an rmap entry */
void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize) { unsigned long order; if (!psize) return; order = ilog2(psize); order <<= KVMPPC_RMAP_CHG_SHIFT; if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER)) *rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras59100.00%1100.00%
Total59100.00%1100.00%

EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change); /* Returns a pointer to the revmap entry for the page mapped by a HPTE */
static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v, unsigned long hpte_gr) { struct kvm_memory_slot *memslot; unsigned long *rmap; unsigned long gfn; gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr)); memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); if (!memslot) return NULL; rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); return rmap; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras91100.00%1100.00%
Total91100.00%1100.00%

/* Remove this HPTE from the chain for a real page */
static void remove_revmap_chain(struct kvm *kvm, long pte_index, struct revmap_entry *rev, unsigned long hpte_v, unsigned long hpte_r) { struct revmap_entry *next, *prev; unsigned long ptel, head; unsigned long *rmap; unsigned long rcbits; rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); ptel = rev->guest_rpte |= rcbits; rmap = revmap_for_hpte(kvm, hpte_v, ptel); if (!rmap) return; lock_rmap(rmap); head = *rmap & KVMPPC_RMAP_INDEX; next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]); prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]); next->back = rev->back; prev->forw = rev->forw; if (head == pte_index) { head = rev->forw; if (head == pte_index) *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); else *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; } *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; if (rcbits & HPTE_R_C) kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r)); unlock_rmap(rmap); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras21997.33%480.00%
David Gibson62.67%120.00%
Total225100.00%5100.00%


long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel, pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) { unsigned long i, pa, gpa, gfn, psize; unsigned long slot_fn, hva; __be64 *hpte; struct revmap_entry *rev; unsigned long g_ptel; struct kvm_memory_slot *memslot; unsigned hpage_shift; bool is_ci; unsigned long *rmap; pte_t *ptep; unsigned int writing; unsigned long mmu_seq; unsigned long rcbits, irq_flags = 0; if (kvm_is_radix(kvm)) return H_FUNCTION; psize = hpte_page_size(pteh, ptel); if (!psize) return H_PARAMETER; writing = hpte_is_writable(ptel); pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); ptel &= ~HPTE_GR_RESERVED; g_ptel = ptel; /* used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_notifier_seq; smp_rmb(); /* Find the memslot (if any) for this address */ gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); gfn = gpa >> PAGE_SHIFT; memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); pa = 0; is_ci = false; rmap = NULL; if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { /* Emulated MMIO - mark this with key=31 */ pteh |= HPTE_V_ABSENT; ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; goto do_insert; } /* Check if the requested page fits entirely in the memslot. */ if (!slot_is_aligned(memslot, psize)) return H_PARAMETER; slot_fn = gfn - memslot->base_gfn; rmap = &memslot->arch.rmap[slot_fn]; /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); /* * If we had a page table table change after lookup, we would * retry via mmu_notifier_retry. */ if (!realmode) local_irq_save(irq_flags); /* * If called in real mode we have MSR_EE = 0. Otherwise * we disable irq above. */ ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift); if (ptep) { pte_t pte; unsigned int host_pte_size; if (hpage_shift) host_pte_size = 1ul << hpage_shift; else host_pte_size = PAGE_SIZE; /* * We should always find the guest page size * to <= host page size, if host is using hugepage */ if (host_pte_size < psize) { if (!realmode) local_irq_restore(flags); return H_PARAMETER; } pte = kvmppc_read_update_linux_pte(ptep, writing); if (pte_present(pte) && !pte_protnone(pte)) { if (writing && !__pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); is_ci = pte_ci(pte); pa = pte_pfn(pte) << PAGE_SHIFT; pa |= hva & (host_pte_size - 1); pa |= gpa & ~PAGE_MASK; } } if (!realmode) local_irq_restore(irq_flags); ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1); ptel |= pa; if (pa) pteh |= HPTE_V_VALID; else { pteh |= HPTE_V_ABSENT; ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO); } /*If we had host pte mapping then Check WIMG */ if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) { if (is_ci) return H_PARAMETER; /* * Allow guest to map emulated device memory as * uncacheable, but actually make it cacheable. */ ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); ptel |= HPTE_R_M; } /* Find and lock the HPTEG slot to use */ do_insert: if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); for (i = 0; i < 8; ++i) { if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | HPTE_V_ABSENT)) break; hpte += 2; } if (i == 8) { /* * Since try_lock_hpte doesn't retry (not even stdcx. * failures), it could be that there is a free slot * but we transiently failed to lock it. Try again, * actually locking each slot and checking it. */ hpte -= 16; for (i = 0; i < 8; ++i) { u64 pte; while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = be64_to_cpu(hpte[0]); if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) break; __unlock_hpte(hpte, pte); hpte += 2; } if (i == 8) return H_PTEG_FULL; } pte_index += i; } else { hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | HPTE_V_ABSENT)) { /* Lock the slot and check again */ u64 pte; while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = be64_to_cpu(hpte[0]); if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { __unlock_hpte(hpte, pte); return H_PTEG_FULL; } } } /* Save away the guest's idea of the second HPTE dword */ rev = &kvm->arch.hpt.rev[pte_index]; if (realmode) rev = real_vmalloc_addr(rev); if (rev) { rev->guest_rpte = g_ptel; note_hpte_modification(kvm, rev); } /* Link HPTE into reverse-map chain */ if (pteh & HPTE_V_VALID) { if (realmode) rmap = real_vmalloc_addr(rmap); lock_rmap(rmap); /* Check for pending invalidations under the rmap chain lock */ if (mmu_notifier_retry(kvm, mmu_seq)) { /* inval in progress, write a non-present HPTE */ pteh |= HPTE_V_ABSENT; pteh &= ~HPTE_V_VALID; ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO); unlock_rmap(rmap); } else { kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode); /* Only set R/C in real HPTE if already set in *rmap */ rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C); } } /* Convert to new format on P9 */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { ptel = hpte_old_to_new_r(pteh, ptel); pteh = hpte_old_to_new_v(pteh); } hpte[1] = cpu_to_be64(ptel); /* Write the first HPTE dword, unlocking the HPTE and making it valid */ eieio(); __unlock_hpte(hpte, pteh); asm volatile("ptesync" : : : "memory"); *pte_idx_ret = pte_index; return H_SUCCESS; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras81179.74%2155.26%
Aneesh Kumar K.V11811.60%718.42%
Alexander Graf383.74%12.63%
Yongji Xie201.97%12.63%
David Gibson141.38%25.26%
Ram Pai60.59%12.63%
Liu Ping Fan50.49%12.63%
Takuya Yoshikawa20.20%12.63%
Gavin Shan10.10%12.63%
Christoffer Dall10.10%12.63%
Mel Gorman10.10%12.63%
Total1017100.00%38100.00%

EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel) { return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras59100.00%1100.00%
Total59100.00%1100.00%

#ifdef __BIG_ENDIAN__ #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) #else #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) #endif
static inline int is_mmio_hpte(unsigned long v, unsigned long r) { return ((v & HPTE_V_ABSENT) && (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); }

Contributors

PersonTokensPropCommitsCommitProp
Yongji Xie40100.00%1100.00%
Total40100.00%1100.00%


static inline int try_lock_tlbie(unsigned int *lock) { unsigned int tmp, old; unsigned int token = LOCK_TOKEN; asm volatile("1:lwarx %1,0,%2\n" " cmpwi cr0,%1,0\n" " bne 2f\n" " stwcx. %3,0,%2\n" " bne- 1b\n" " isync\n" "2:" : "=&r" (tmp), "=&r" (old) : "r" (lock), "r" (token) : "cc", "memory"); return old == 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras32100.00%1100.00%
Total32100.00%1100.00%


static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, long npages, int global, bool need_sync) { long i; /* * We use the POWER9 5-operand versions of tlbie and tlbiel here. * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores * the RS field, this is backwards-compatible with P7 and P8. */ if (global) { while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) cpu_relax(); if (need_sync) asm volatile("ptesync" : : : "memory"); for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (kvm->arch.lpid)); trace_tlbie(kvm->arch.lpid, 0, rbvalues[i], kvm->arch.lpid, 0, 0, 0); } asm volatile("eieio; tlbsync; ptesync" : : : "memory"); kvm->arch.tlbie_lock = 0; } else { if (need_sync) asm volatile("ptesync" : : : "memory"); for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (0)); trace_tlbie(kvm->arch.lpid, 1, rbvalues[i], 0, 0, 0, 0); } asm volatile("ptesync" : : : "memory"); } }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras11467.06%266.67%
Balbir Singh5632.94%133.33%
Total170100.00%3100.00%


long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, unsigned long pte_index, unsigned long avpn, unsigned long *hpret) { __be64 *hpte; unsigned long v, r, rb; struct revmap_entry *rev; u64 pte, orig_pte, pte_r; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = orig_pte = be64_to_cpu(hpte[0]); pte_r = be64_to_cpu(hpte[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { pte = hpte_new_to_old_v(pte, pte_r); pte_r = hpte_new_to_old_r(pte_r); } if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (pte & avpn) != 0)) { __unlock_hpte(hpte, orig_pte); return H_NOT_FOUND; } rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); v = pte & ~HPTE_V_HVLOCK; if (v & HPTE_V_VALID) { hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); rb = compute_tlbie_rb(v, pte_r, pte_index); do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); /* * The reference (R) and change (C) bits in a HPT * entry can be set by hardware at any time up until * the HPTE is invalidated and the TLB invalidation * sequence has completed. This means that when * removing a HPTE, we need to re-read the HPTE after * the invalidation sequence has completed in order to * obtain reliable values of R and C. */ remove_revmap_chain(kvm, pte_index, rev, v, be64_to_cpu(hpte[1])); } r = rev->guest_rpte & ~HPTE_GR_RESERVED; note_hpte_modification(kvm, rev); unlock_hpte(hpte, 0); if (is_mmio_hpte(v, pte_r)) atomic64_inc(&kvm->arch.mmio_update); if (v & HPTE_V_ABSENT) v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID; hpret[0] = v; hpret[1] = r; return H_SUCCESS; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras33385.82%1372.22%
Alexander Graf235.93%15.56%
Yongji Xie184.64%15.56%
David Gibson112.84%211.11%
Aneesh Kumar K.V30.77%15.56%
Total388100.00%18100.00%

EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index, unsigned long avpn) { return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, &vcpu->arch.gpr[4]); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras46100.00%1100.00%
Total46100.00%1100.00%


long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long *args = &vcpu->arch.gpr[4]; __be64 *hp, *hptes[4]; unsigned long tlbrb[4]; long int i, j, k, n, found, indexes[4]; unsigned long flags, req, pte_index, rcbits; int global; long int ret = H_SUCCESS; struct revmap_entry *rev, *revs[4]; u64 hp0, hp1; if (kvm_is_radix(kvm)) return H_FUNCTION; global = global_invalidates(kvm, 0); for (i = 0; i < 4 && ret == H_SUCCESS; ) { n = 0; for (; i < 4; ++i) { j = i * 2; pte_index = args[j]; flags = pte_index >> 56; pte_index &= ((1ul << 56) - 1); req = flags >> 6; flags &= 3; if (req == 3) { /* no more requests */ i = 4; break; } if (req != 1 || flags == 3 || pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) { /* parameter error */ args[j] = ((0xa0 | flags) << 56) + pte_index; ret = H_PARAMETER; break; } hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4)); /* to avoid deadlock, don't spin except for first */ if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { if (n) break; while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) cpu_relax(); } found = 0; hp0 = be64_to_cpu(hp[0]); hp1 = be64_to_cpu(hp[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hp0 = hpte_new_to_old_v(hp0, hp1); hp1 = hpte_new_to_old_r(hp1); } if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) { switch (flags & 3) { case 0