Release 4.11 arch/ia64/mm/hugetlbpage.c
/*
* IA-64 Huge TLB Page Support for Kernel.
*
* Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
* Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
*
* Sep, 2003: add numa support
* Feb, 2004: dynamic hugetlb page size via boot parameter
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/log2.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
EXPORT_SYMBOL(hpage_shift);
pte_t *
huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
pud = pud_alloc(mm, pgd, taddr);
if (pud) {
pmd = pmd_alloc(mm, pud, taddr);
if (pmd)
pte = pte_alloc_map(mm, pmd, taddr);
}
return pte;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 49 | 48.51% | 2 | 28.57% |
Nicholas Piggin | 22 | 21.78% | 1 | 14.29% |
David Mosberger-Tang | 12 | 11.88% | 1 | 14.29% |
Kenneth W. Chen | 12 | 11.88% | 1 | 14.29% |
Andi Kleen | 4 | 3.96% | 1 | 14.29% |
Rohit Seth | 2 | 1.98% | 1 | 14.29% |
Total | 101 | 100.00% | 7 | 100.00% |
pte_t *
huge_pte_offset (struct mm_struct *mm, unsigned long addr)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) {
pud = pud_offset(pgd, taddr);
if (pud_present(*pud)) {
pmd = pmd_offset(pud, taddr);
if (pmd_present(*pmd))
pte = pte_offset_map(pmd, taddr);
}
}
return pte;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 66 | 60.55% | 2 | 50.00% |
Nicholas Piggin | 24 | 22.02% | 1 | 25.00% |
Kenneth W. Chen | 19 | 17.43% | 1 | 25.00% |
Total | 109 | 100.00% | 4 | 100.00% |
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
/*
* Don't actually need to do any preparation, but need to make sure
* the address is in the right region.
*/
int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
if (REGION_NUMBER(addr) != RGN_HPAGE)
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 35 | 62.50% | 1 | 20.00% |
David Mosberger-Tang | 14 | 25.00% | 1 | 20.00% |
Andi Kleen | 5 | 8.93% | 1 | 20.00% |
David Gibson | 1 | 1.79% | 1 | 20.00% |
Peter Chubb | 1 | 1.79% | 1 | 20.00% |
Total | 56 | 100.00% | 5 | 100.00% |
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
{
struct page *page;
pte_t *ptep;
if (REGION_NUMBER(addr) != RGN_HPAGE)
return ERR_PTR(-EINVAL);
ptep = huge_pte_offset(mm, addr);
if (!ptep || pte_none(*ptep))
return NULL;
page = pte_page(*ptep);
page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
return page;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Gibson | 54 | 59.34% | 1 | 20.00% |
Andrew Morton | 21 | 23.08% | 1 | 20.00% |
David Mosberger-Tang | 14 | 15.38% | 1 | 20.00% |
Peter Chubb | 1 | 1.10% | 1 | 20.00% |
Christoph Lameter | 1 | 1.10% | 1 | 20.00% |
Total | 91 | 100.00% | 5 | 100.00% |
int pmd_huge(pmd_t pmd)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Gibson | 8 | 72.73% | 1 | 33.33% |
David Mosberger-Tang | 2 | 18.18% | 1 | 33.33% |
Andrew Morton | 1 | 9.09% | 1 | 33.33% |
Total | 11 | 100.00% | 3 | 100.00% |
int pud_huge(pud_t pud)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
/*
* This is called to free hugetlb page tables.
*
* The offset of these addresses from the base of the hugetlb
* region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
* the standard free_pgd_range will free the right page tables.
*
* If floor and ceiling are also in the hugetlb region, they
* must likewise be scaled down; but if outside, left unchanged.
*/
addr = htlbpage_to_page(addr);
end = htlbpage_to_page(end);
if (REGION_NUMBER(floor) == RGN_HPAGE)
floor = htlbpage_to_page(floor);
if (REGION_NUMBER(ceiling) == RGN_HPAGE)
ceiling = htlbpage_to_page(ceiling);
free_pgd_range(tlb, addr, end, floor, ceiling);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Gibson | 34 | 39.53% | 1 | 20.00% |
David Mosberger-Tang | 24 | 27.91% | 1 | 20.00% |
Andrew Morton | 21 | 24.42% | 2 | 40.00% |
Kenneth W. Chen | 7 | 8.14% | 1 | 20.00% |
Total | 86 | 100.00% | 5 | 100.00% |
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
if (len & ~HPAGE_MASK)
return -EINVAL;
/* Handle MAP_FIXED */
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
/* This code assumes that RGN_HPAGE != 0. */
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
addr = HPAGE_REGION_BASE;
info.flags = 0;
info.length = len;
info.low_limit = addr;
info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
info.align_offset = 0;
return vm_unmapped_area(&info);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 58 | 37.18% | 1 | 14.29% |
Michel Lespinasse | 41 | 26.28% | 1 | 14.29% |
Benjamin Herrenschmidt | 25 | 16.03% | 1 | 14.29% |
David Mosberger-Tang | 20 | 12.82% | 1 | 14.29% |
Rohit Seth | 8 | 5.13% | 1 | 14.29% |
Andi Kleen | 2 | 1.28% | 1 | 14.29% |
Peter Chubb | 2 | 1.28% | 1 | 14.29% |
Total | 156 | 100.00% | 7 | 100.00% |
static int __init hugetlb_setup_sz(char *str)
{
u64 tr_pages;
unsigned long long size;
if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
/*
* shouldn't happen, but just in case.
*/
tr_pages = 0x15557000UL;
size = memparse(str, &str);
if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
size <= PAGE_SIZE ||
size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
printk(KERN_WARNING "Invalid huge page size specified\n");
return 1;
}
hpage_shift = __ffs(size);
/*
* boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
* override here with new page shift.
*/
ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kenneth W. Chen | 104 | 95.41% | 1 | 33.33% |
Vignesh Babu | 4 | 3.67% | 1 | 33.33% |
Mel Gorman | 1 | 0.92% | 1 | 33.33% |
Total | 109 | 100.00% | 3 | 100.00% |
early_param("hugepagesz", hugetlb_setup_sz);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 264 | 33.29% | 6 | 26.09% |
Kenneth W. Chen | 156 | 19.67% | 3 | 13.04% |
David Mosberger-Tang | 106 | 13.37% | 1 | 4.35% |
David Gibson | 98 | 12.36% | 2 | 8.70% |
Nicholas Piggin | 46 | 5.80% | 1 | 4.35% |
Michel Lespinasse | 41 | 5.17% | 1 | 4.35% |
Benjamin Herrenschmidt | 25 | 3.15% | 1 | 4.35% |
Andi Kleen | 22 | 2.77% | 2 | 8.70% |
Rohit Seth | 12 | 1.51% | 1 | 4.35% |
Jack Steiner | 8 | 1.01% | 1 | 4.35% |
Vignesh Babu | 7 | 0.88% | 1 | 4.35% |
Peter Chubb | 4 | 0.50% | 1 | 4.35% |
Mel Gorman | 3 | 0.38% | 1 | 4.35% |
Christoph Lameter | 1 | 0.13% | 1 | 4.35% |
Total | 793 | 100.00% | 23 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.