Contributors: 11
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | David Gibson | 
          89 | 
          28.80% | 
          4 | 
          17.39% | 
        
        
          | Aneesh Kumar K.V | 
          62 | 
          20.06% | 
          4 | 
          17.39% | 
        
        
          | Anton Blanchard | 
          53 | 
          17.15% | 
          4 | 
          17.39% | 
        
        
          | Benjamin Herrenschmidt | 
          35 | 
          11.33% | 
          3 | 
          13.04% | 
        
        
          | Christophe Leroy | 
          26 | 
          8.41% | 
          1 | 
          4.35% | 
        
        
          | Mark Nelson | 
          18 | 
          5.83% | 
          1 | 
          4.35% | 
        
        
          | Balbir Singh | 
          10 | 
          3.24% | 
          1 | 
          4.35% | 
        
        
          | Andrew Morton | 
          7 | 
          2.27% | 
          2 | 
          8.70% | 
        
        
          | Hari Bathini | 
          6 | 
          1.94% | 
          1 | 
          4.35% | 
        
        
          | Thomas Gleixner | 
          2 | 
          0.65% | 
          1 | 
          4.35% | 
        
        
          | Mike Rapoport | 
          1 | 
          0.32% | 
          1 | 
          4.35% | 
        
	  
	  
        
          | Total | 
          309 | 
           | 
          23 | 
           | 
	    
	  
    
 
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_PGALLOC_64_H
#define _ASM_POWERPC_PGALLOC_64_H
/*
 */
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
struct vmemmap_backing {
	struct vmemmap_backing *list;
	unsigned long phys;
	unsigned long virt_addr;
};
extern struct vmemmap_backing *vmemmap_list;
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
{
	p4d_set(p4d, (unsigned long)pud);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
			pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
	pud_set(pud, (unsigned long)pmd);
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
				       pte_t *pte)
{
	pmd_set(pmd, (unsigned long)pte);
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
				pgtable_t pte_page)
{
	pmd_set(pmd, (unsigned long)pte_page);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
			pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
	kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
}
#define __pmd_free_tlb(tlb, pmd, addr)		      \
	pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
#define __pud_free_tlb(tlb, pud, addr)		      \
	pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
#endif /* _ASM_POWERPC_PGALLOC_64_H */