Contributors: 9
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Linus Torvalds (pre-git) |
71 |
33.33% |
2 |
16.67% |
Richard Henderson |
67 |
31.46% |
1 |
8.33% |
Linus Torvalds |
34 |
15.96% |
1 |
8.33% |
Benjamin Herrenschmidt |
10 |
4.69% |
1 |
8.33% |
Mike Rapoport |
10 |
4.69% |
2 |
16.67% |
Ivan Kokshaysky |
9 |
4.23% |
2 |
16.67% |
Martin Schwidefsky |
8 |
3.76% |
1 |
8.33% |
Al Viro |
3 |
1.41% |
1 |
8.33% |
Greg Kroah-Hartman |
1 |
0.47% |
1 |
8.33% |
Total |
213 |
|
12 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ALPHA_PGALLOC_H
#define _ALPHA_PGALLOC_H
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_set(pmd, pte);
}
static inline void
pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pud_set(pud, pmd);
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);
static inline void
pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *ret = (pmd_t *)__get_free_page(GFP_PGTABLE_USER);
return ret;
}
static inline void
pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
#endif /* _ALPHA_PGALLOC_H */