cregit-Linux how code gets into the kernel

Release 4.10 arch/sparc/mm/hugetlbpage.c

Directory: arch/sparc/mm
/*
 * SPARC64 Huge TLB page support.
 *
 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
 */

#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>

#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>

/* Slightly simplified from the non-hugepage variant because by
 * definition we don't have to worry about any page coloring stuff
 */


static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long task_size = TASK_SIZE; struct vm_unmapped_area_info info; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_offset = 0; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END; info.high_limit = task_size; addr = vm_unmapped_area(&info); } return addr; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller8356.46%150.00%
michel lespinassemichel lespinasse6443.54%150.00%
Total147100.00%2100.00%


static unsigned long hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_offset = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = STACK_TOP32; addr = vm_unmapped_area(&info); } return addr; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller9963.06%150.00%
michel lespinassemichel lespinasse5836.94%150.00%
Total157100.00%2100.00%


unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long task_size = TASK_SIZE; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; if (len & ~HPAGE_MASK) return -EINVAL; if (len > task_size) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) return hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller16486.32%133.33%
benjamin herrenschmidtbenjamin herrenschmidt2412.63%133.33%
andi kleenandi kleen21.05%133.33%
Total190100.00%3100.00%


pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (pud) pte = (pte_t *)pmd_alloc(mm, pud, addr); return pte; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller6688.00%466.67%
nitin guptanitin gupta56.67%116.67%
andi kleenandi kleen45.33%116.67%
Total75100.00%6100.00%


pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); if (!pud_none(*pud)) pte = (pte_t *)pmd_offset(pud, addr); } return pte; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller7893.98%480.00%
nitin guptanitin gupta56.02%120.00%
Total83100.00%5100.00%


void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { pte_t orig; if (!pte_present(*ptep) && pte_present(entry)) mm->context.hugetlb_pte_count++; addr &= HPAGE_MASK; orig = *ptep; *ptep = entry; /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ maybe_tlb_batch_add(mm, addr, ptep, orig, 0); maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller4247.73%450.00%
nitin guptanitin gupta3944.32%225.00%
david gibsondavid gibson66.82%112.50%
mike kravetzmike kravetz11.14%112.50%
Total88100.00%8100.00%


pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t entry; entry = *ptep; if (pte_present(entry)) mm->context.hugetlb_pte_count--; addr &= HPAGE_MASK; *ptep = __pte(0UL); /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ maybe_tlb_batch_add(mm, addr, ptep, entry, 0); maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0); return entry; }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta2833.33%222.22%
david s. millerdavid s. miller2833.33%555.56%
david gibsondavid gibson2732.14%111.11%
mike kravetzmike kravetz11.19%111.11%
Total84100.00%9100.00%


int pmd_huge(pmd_t pmd) { return !pmd_none(pmd) && (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta2066.67%150.00%
david gibsondavid gibson1033.33%150.00%
Total30100.00%2100.00%


int pud_huge(pud_t pud) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
andi kleenandi kleen11100.00%1100.00%
Total11100.00%1100.00%


static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); atomic_long_dec(&tlb->mm->nr_ptes); }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta52100.00%1100.00%
Total52100.00%1100.00%


static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) continue; if (is_hugetlb_pmd(*pmd)) pmd_clear(pmd); else hugetlb_free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); mm_dec_nr_pmds(tlb->mm); }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta182100.00%1100.00%
Total182100.00%1100.00%


static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(pgd, start); pgd_clear(pgd); pud_free_tlb(tlb, pud, start); }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta166100.00%1100.00%
Total166100.00%1100.00%


void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; unsigned long next; pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta94100.00%1100.00%
Total94100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta59442.52%211.76%
david s. millerdavid s. miller59242.38%847.06%
michel lespinassemichel lespinasse1228.73%15.88%
david gibsondavid gibson433.08%15.88%
benjamin herrenschmidtbenjamin herrenschmidt241.72%15.88%
andi kleenandi kleen171.22%211.76%
andrew mortonandrew morton30.21%15.88%
mike kravetzmike kravetz20.14%15.88%
Total1397100.00%17100.00%
Directory: arch/sparc/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.