Release 4.14 arch/mn10300/mm/pgtable.c
/* MN10300 Page table management
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/quicklist.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
/*
* Associate a large virtual page frame with a given physical page frame
* and protection flags for that frame. pfn is for the base of the page,
* vaddr is what the page gets mapped to - both must be properly aligned.
* The pmd must already be instantiated. Assumes PAE mode.
*/
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
return; /* BUG(); */
}
if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
return; /* BUG(); */
}
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
return; /* BUG(); */
}
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
set_pmd(pmd, pfn_pmd(pfn, flags));
/*
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
local_flush_tlb_one(vaddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 133 | 100.00% | 2 | 100.00% |
Total | 133 | 100.00% | 2 | 100.00% |
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
#ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
pte = alloc_pages(GFP_KERNEL, 0);
#endif
if (!pte)
return NULL;
clear_highpage(pte);
if (!pgtable_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
return pte;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 60 | 73.17% | 1 | 50.00% |
Kirill A. Shutemov | 22 | 26.83% | 1 | 50.00% |
Total | 82 | 100.00% | 2 | 100.00% |
/*
* List of all pgd's needed for non-PAE so it can invalidate entries
* in both cached and uncached pgd's; not needed for PAE since the
* kernel pmd is shared. If PAE were not to share the pmd a similar
* tactic would be needed. This is essentially codepath-based locking
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
* If the locking proves to be non-performant, a ticketing scheme with
* checks at dup_mmap(), exec(), and other mmlist addition points
* could be used. The locking scheme was chosen on the basis of
* manfred's recommendations and having no core impact whatsoever.
* -- nyc
*/
DEFINE_SPINLOCK(pgd_lock);
struct page *pgd_list;
static inline void pgd_list_add(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
page->index = (unsigned long) pgd_list;
if (pgd_list)
set_page_private(pgd_list, (unsigned long) &page->index);
pgd_list = page;
set_page_private(page, (unsigned long) &pgd_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 65 | 100.00% | 1 | 100.00% |
Total | 65 | 100.00% | 1 | 100.00% |
static inline void pgd_list_del(pgd_t *pgd)
{
struct page *next, **pprev, *page = virt_to_page(pgd);
next = (struct page *) page->index;
pprev = (struct page **) page_private(page);
*pprev = next;
if (next)
set_page_private(next, (unsigned long) pprev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 72 | 100.00% | 1 | 100.00% |
Total | 72 | 100.00% | 1 | 100.00% |
void pgd_ctor(void *pgd)
{
unsigned long flags;
if (PTRS_PER_PMD == 1)
spin_lock_irqsave(&pgd_lock, flags);
memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
if (PTRS_PER_PMD > 1)
return;
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 87 | 100.00% | 1 | 100.00% |
Total | 87 | 100.00% | 1 | 100.00% |
/* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd)
{
unsigned long flags; /* can be called from interrupt context */
spin_lock_irqsave(&pgd_lock, flags);
pgd_list_del(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
pgd_t *pgd_alloc(struct mm_struct *mm)
{
return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, pgd_dtor, pgd);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
void __init pgtable_cache_init(void)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 7 | 100.00% | 1 | 100.00% |
Total | 7 | 100.00% | 1 | 100.00% |
void check_pgt_cache(void)
{
quicklist_trim(0, pgd_dtor, 25, 16);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Howells | 614 | 95.64% | 2 | 40.00% |
Kirill A. Shutemov | 22 | 3.43% | 1 | 20.00% |
Tejun Heo | 5 | 0.78% | 1 | 20.00% |
Nadia Yvette Chambers | 1 | 0.16% | 1 | 20.00% |
Total | 642 | 100.00% | 5 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.