Contributors: 31
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | Christoph Lameter | 
          220 | 
          25.70% | 
          4 | 
          6.90% | 
        
        
          | Linus Torvalds (pre-git) | 
          165 | 
          19.28% | 
          9 | 
          15.52% | 
        
        
          | Peter Zijlstra | 
          111 | 
          12.97% | 
          5 | 
          8.62% | 
        
        
          | James Bottomley | 
          67 | 
          7.83% | 
          4 | 
          6.90% | 
        
        
          | Mel Gorman | 
          65 | 
          7.59% | 
          2 | 
          3.45% | 
        
        
          | Andrew Morton | 
          60 | 
          7.01% | 
          4 | 
          6.90% | 
        
        
          | Linus Torvalds | 
          22 | 
          2.57% | 
          2 | 
          3.45% | 
        
        
          | Jeremy Fitzhardinge | 
          19 | 
          2.22% | 
          1 | 
          1.72% | 
        
        
          | Geert Uytterhoeven | 
          18 | 
          2.10% | 
          1 | 
          1.72% | 
        
        
          | Ingo Molnar | 
          11 | 
          1.29% | 
          1 | 
          1.72% | 
        
        
          | Andi Kleen | 
          10 | 
          1.17% | 
          1 | 
          1.72% | 
        
        
          | Atsushi Nemoto | 
          10 | 
          1.17% | 
          2 | 
          3.45% | 
        
        
          | Russell King | 
          10 | 
          1.17% | 
          2 | 
          3.45% | 
        
        
          | Matthew Wilcox | 
          7 | 
          0.82% | 
          1 | 
          1.72% | 
        
        
          | Vivek Goyal | 
          6 | 
          0.70% | 
          1 | 
          1.72% | 
        
        
          | David Hildenbrand | 
          6 | 
          0.70% | 
          1 | 
          1.72% | 
        
        
          | Américo Wang | 
          6 | 
          0.70% | 
          3 | 
          5.17% | 
        
        
          | Cesar Eduardo Barros | 
          6 | 
          0.70% | 
          1 | 
          1.72% | 
        
        
          | Nate Diller | 
          6 | 
          0.70% | 
          1 | 
          1.72% | 
        
        
          | Khalid Aziz | 
          5 | 
          0.58% | 
          1 | 
          1.72% | 
        
        
          | David Mosberger-Tang | 
          4 | 
          0.47% | 
          1 | 
          1.72% | 
        
        
          | Michael Ellerman | 
          4 | 
          0.47% | 
          1 | 
          1.72% | 
        
        
          | Paul Gortmaker | 
          3 | 
          0.35% | 
          1 | 
          1.72% | 
        
        
          | Kumar Gala | 
          3 | 
          0.35% | 
          1 | 
          1.72% | 
        
        
          | Catalin Marinas | 
          3 | 
          0.35% | 
          1 | 
          1.72% | 
        
        
          | Richard Henderson | 
          3 | 
          0.35% | 
          1 | 
          1.72% | 
        
        
          | Akinobu Mita | 
          2 | 
          0.23% | 
          1 | 
          1.72% | 
        
        
          | Dave Jones | 
          1 | 
          0.12% | 
          1 | 
          1.72% | 
        
        
          | Andreas Fenkart | 
          1 | 
          0.12% | 
          1 | 
          1.72% | 
        
        
          | Chintan Pandya | 
          1 | 
          0.12% | 
          1 | 
          1.72% | 
        
        
          | Greg Kroah-Hartman | 
          1 | 
          0.12% | 
          1 | 
          1.72% | 
        
	  
	  
        
          | Total | 
          856 | 
           | 
          58 | 
           | 
	    
	  
    
 
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <asm/cacheflush.h>
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
}
#endif
#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
}
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
}
#endif
#include <asm/kmap_types.h>
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern unsigned long totalhigh_pages;
void kmap_flush_unused(void);
struct page *kmap_to_page(void *addr);
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
static inline struct page *kmap_to_page(void *addr)
{
	return virt_to_page(addr);
}
#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
{
	might_sleep();
	return page_address(page);
}
static inline void kunmap(struct page *page)
{
}
static inline void *kmap_atomic(struct page *page)
{
	preempt_disable();
	pagefault_disable();
	return page_address(page);
}
#define kmap_atomic_prot(page, prot)	kmap_atomic(page)
static inline void __kunmap_atomic(void *addr)
{
	pagefault_enable();
	preempt_enable();
}
#define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
#define kmap_flush_unused()	do {} while(0)
#endif
#endif /* CONFIG_HIGHMEM */
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
DECLARE_PER_CPU(int, __kmap_atomic_idx);
static inline int kmap_atomic_idx_push(void)
{
	int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
#ifdef CONFIG_DEBUG_HIGHMEM
	WARN_ON_ONCE(in_irq() && !irqs_disabled());
	BUG_ON(idx >= KM_TYPE_NR);
#endif
	return idx;
}
static inline int kmap_atomic_idx(void)
{
	return __this_cpu_read(__kmap_atomic_idx) - 1;
}
static inline void kmap_atomic_idx_pop(void)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	int idx = __this_cpu_dec_return(__kmap_atomic_idx);
	BUG_ON(idx < 0);
#else
	__this_cpu_dec(__kmap_atomic_idx);
#endif
}
#endif
/*
 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
 */
#define kunmap_atomic(addr)                                     \
do {                                                            \
	BUILD_BUG_ON(__same_type((addr), struct page *));       \
	__kunmap_atomic(addr);                                  \
} while (0)
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
	void *addr = kmap_atomic(page);
	clear_user_page(addr, vaddr, page);
	kunmap_atomic(addr);
}
#endif
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/**
 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
 * @vma: The VMA the page is to be allocated for
 * @vaddr: The virtual address the page will be inserted into
 *
 * This function will allocate a page for a VMA but the caller is expected
 * to specify via movableflags whether the page will be movable in the
 * future or not
 *
 * An architecture may override this function by defining
 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
 * implementation.
 */
static inline struct page *
__alloc_zeroed_user_highpage(gfp_t movableflags,
			struct vm_area_struct *vma,
			unsigned long vaddr)
{
	struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
			vma, vaddr);
	if (page)
		clear_user_highpage(page, vaddr);
	return page;
}
#endif
/**
 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
 * @vma: The VMA the page is to be allocated for
 * @vaddr: The virtual address the page will be inserted into
 *
 * This function will allocate a page for a VMA that the caller knows will
 * be able to migrate in the future using move_pages() or reclaimed
 */
static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
					unsigned long vaddr)
{
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
}
static inline void clear_highpage(struct page *page)
{
	void *kaddr = kmap_atomic(page);
	clear_page(kaddr);
	kunmap_atomic(kaddr);
}
static inline void zero_user_segments(struct page *page,
	unsigned start1, unsigned end1,
	unsigned start2, unsigned end2)
{
	void *kaddr = kmap_atomic(page);
	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
	if (end1 > start1)
		memset(kaddr + start1, 0, end1 - start1);
	if (end2 > start2)
		memset(kaddr + start2, 0, end2 - start2);
	kunmap_atomic(kaddr);
	flush_dcache_page(page);
}
static inline void zero_user_segment(struct page *page,
	unsigned start, unsigned end)
{
	zero_user_segments(page, start, end, 0, 0);
}
static inline void zero_user(struct page *page,
	unsigned start, unsigned size)
{
	zero_user_segments(page, start, start + size, 0, 0);
}
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
static inline void copy_user_highpage(struct page *to, struct page *from,
	unsigned long vaddr, struct vm_area_struct *vma)
{
	char *vfrom, *vto;
	vfrom = kmap_atomic(from);
	vto = kmap_atomic(to);
	copy_user_page(vto, vfrom, vaddr, to);
	kunmap_atomic(vto);
	kunmap_atomic(vfrom);
}
#endif
#ifndef __HAVE_ARCH_COPY_HIGHPAGE
static inline void copy_highpage(struct page *to, struct page *from)
{
	char *vfrom, *vto;
	vfrom = kmap_atomic(from);
	vto = kmap_atomic(to);
	copy_page(vto, vfrom);
	kunmap_atomic(vto);
	kunmap_atomic(vfrom);
}
#endif
#endif /* _LINUX_HIGHMEM_H */