cregit-Linux how code gets into the kernel

Release 4.11 arch/sparc/mm/tlb.c

Directory: arch/sparc/mm
/* arch/sparc64/mm/tlb.c
 *
 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
 */

#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/preempt.h>

#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>

/* Heavily inspired by the ppc64 code.  */

static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);


void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct mm_struct *mm = tb->mm; if (!tb->tlb_nr) goto out; flush_tsb_user(tb); if (CTX_VALID(mm->context)) { if (tb->tlb_nr == 1) { global_flush_tlb_page(mm, tb->vaddrs[0]); } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(tb->mm->context), tb->tlb_nr, &tb->vaddrs[0]); #endif } } tb->tlb_nr = 0; out: put_cpu_var(tlb_batch); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller12890.78%685.71%
Peter Zijlstra139.22%114.29%
Total141100.00%7100.00%


void arch_enter_lazy_mmu_mode(void) { struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); tb->active = 1; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2291.67%150.00%
Christoph Lameter28.33%150.00%
Total24100.00%2100.00%


void arch_leave_lazy_mmu_mode(void) { struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3193.94%150.00%
Christoph Lameter26.06%150.00%
Total33100.00%2100.00%


static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec, unsigned int hugepage_shift) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (exec) vaddr |= 0x1UL; nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (!tb->active) { flush_tsb_user_page(mm, vaddr, hugepage_shift); global_flush_tlb_page(mm, vaddr); goto out; } if (nr == 0) { tb->mm = mm; tb->hugepage_shift = hugepage_shift; } if (tb->hugepage_shift != hugepage_shift) { flush_tlb_pending(); tb->hugepage_shift = hugepage_shift; nr = 0; } tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); out: put_cpu_var(tlb_batch); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller13976.37%660.00%
Nitin Gupta3720.33%220.00%
Peter Zijlstra42.20%110.00%
Dave Kleikamp21.10%110.00%
Total182100.00%10100.00%


void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm, unsigned int hugepage_shift) { if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) goto no_cache_flush; page = pfn_to_page(pfn); if (PageReserved(page)) goto no_cache_flush; /* A real file page? */ mapping = page_mapping(page); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) flush_dcache_page_all(mm, page); } no_cache_flush: if (!fullmm) tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller14893.08%342.86%
Nitin Gupta63.77%228.57%
Peter Zijlstra42.52%114.29%
Hugh Dickins10.63%114.29%
Total159100.00%7100.00%

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, pmd_t pmd) { unsigned long end; pte_t *pte; pte = pte_offset_map(&pmd, vaddr); end = vaddr + HPAGE_SIZE; while (vaddr < end) { if (pte_val(*pte) & _PAGE_VALID) { bool exec = pte_exec(*pte); tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); } pte++; vaddr += PAGE_SIZE; } pte_unmap(pte); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller9297.87%360.00%
Nitin Gupta22.13%240.00%
Total94100.00%5100.00%


void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { pmd_t orig = *pmdp; *pmdp = pmd; if (mm == &init_mm) return; if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { /* * Note that this routine only sets pmds for THP pages. * Hugetlb pages are handled elsewhere. We need to check * for huge zero page. Huge zero pages are like hugetlb * pages in that there is no RSS, but there is the need * for TSB entries. So, huge zero page counts go into * hugetlb_pte_count. */ if (pmd_val(pmd) & _PAGE_PMD_HUGE) { if (is_huge_zero_page(pmd_page(pmd))) mm->context.hugetlb_pte_count++; else mm->context.thp_pte_count++; } else { if (is_huge_zero_page(pmd_page(orig))) mm->context.hugetlb_pte_count--; else mm->context.thp_pte_count--; } /* Do not try to allocate the TSB hash table if we * don't have one already. We have various locks held * and thus we'll end up doing a GFP_KERNEL allocation * in an atomic context. * * Instead, we let the first TLB miss on a hugepage * take care of this. */ } if (!pmd_none(orig)) { addr &= HPAGE_MASK; if (pmd_trans_huge(orig)) { pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte); tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, REAL_HPAGE_SHIFT); } else { tlb_batch_pmd_scan(mm, addr, orig); } } }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller15576.73%660.00%
Mike Kravetz4321.29%220.00%
Nitin Gupta41.98%220.00%
Total202100.00%10100.00%

/* * This routine is only called when splitting a THP */
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t entry = *pmdp; pmd_val(entry) &= ~_PAGE_VALID; set_pmd_at(vma->vm_mm, address, pmdp, entry); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); /* * set_pmd_at() will not be called in a way to decrement * thp_pte_count when splitting a THP, so do it now. * Sanity check pmd before doing the actual decrement. */ if ((pmd_val(entry) & _PAGE_PMD_HUGE) && !is_huge_zero_page(pmd_page(entry))) (vma->vm_mm)->context.thp_pte_count--; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller5663.64%150.00%
Mike Kravetz3236.36%150.00%
Total88100.00%2100.00%


void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { struct list_head *lh = (struct list_head *) pgtable; assert_spin_locked(&mm->page_table_lock); /* FIFO */ if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(lh); else list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); pmd_huge_pte(mm, pmdp) = pgtable; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller6176.25%250.00%
Kirill A. Shutemov1518.75%125.00%
Aneesh Kumar K.V45.00%125.00%
Total80100.00%4100.00%


pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { struct list_head *lh; pgtable_t pgtable; assert_spin_locked(&mm->page_table_lock); /* FIFO */ pgtable = pmd_huge_pte(mm, pmdp); lh = (struct list_head *) pgtable; if (list_empty(lh)) pmd_huge_pte(mm, pmdp) = NULL; else { pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; list_del(lh); } pte_val(pgtable[0]) = 0; pte_val(pgtable[1]) = 0; return pgtable; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller8880.00%240.00%
Kirill A. Shutemov1513.64%120.00%
Aneesh Kumar K.V43.64%120.00%
Peter Zijlstra32.73%120.00%
Total110100.00%5100.00%

#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

Overall Contributors

PersonTokensPropCommitsCommitProp
David S. Miller96783.08%1659.26%
Mike Kravetz766.53%27.41%
Nitin Gupta494.21%311.11%
Kirill A. Shutemov302.58%13.70%
Peter Zijlstra272.32%13.70%
Aneesh Kumar K.V80.69%13.70%
Christoph Lameter40.34%13.70%
Dave Kleikamp20.17%13.70%
Hugh Dickins10.09%13.70%
Total1164100.00%27100.00%
Directory: arch/sparc/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.