Contributors: 9
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Richard Henderson |
77 |
35.32% |
2 |
16.67% |
Linus Torvalds (pre-git) |
71 |
32.57% |
2 |
16.67% |
Linus Torvalds |
34 |
15.60% |
1 |
8.33% |
Benjamin Herrenschmidt |
10 |
4.59% |
1 |
8.33% |
Ivan Kokshaysky |
9 |
4.13% |
2 |
16.67% |
Martin Schwidefsky |
8 |
3.67% |
1 |
8.33% |
Mike Rapoport |
5 |
2.29% |
1 |
8.33% |
Al Viro |
3 |
1.38% |
1 |
8.33% |
Greg Kroah-Hartman |
1 |
0.46% |
1 |
8.33% |
Total |
218 |
|
12 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ALPHA_PGALLOC_H
#define _ALPHA_PGALLOC_H
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_set(pmd, pte);
}
static inline void
pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
pgd_set(pgd, pmd);
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);
static inline void
pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *ret = (pmd_t *)__get_free_page(GFP_PGTABLE_USER);
return ret;
}
static inline void
pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
#define check_pgt_cache() do { } while (0)
#endif /* _ALPHA_PGALLOC_H */