Contributors: 14
Author Tokens Token Proportion Commits Commit Proportion
Ian Campbell 39 30.71% 1 4.76%
Ingo Molnar 27 21.26% 3 14.29%
Kamezawa Hiroyuki 11 8.66% 2 9.52%
Mel Gorman 10 7.87% 1 4.76%
Kirill A. Shutemov 9 7.09% 3 14.29%
Andrew Lutomirski 8 6.30% 1 4.76%
Jerome Marchand 6 4.72% 1 4.76%
Andrew Morton 5 3.94% 2 9.52%
Heiko Carstens 3 2.36% 1 4.76%
Jeremy Fitzhardinge 3 2.36% 1 4.76%
Linus Torvalds (pre-git) 3 2.36% 2 9.52%
Sai Praneeth 1 0.79% 1 4.76%
Barry Song 1 0.79% 1 4.76%
Greg Kroah-Hartman 1 0.79% 1 4.76%
Total 127 21


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MM_TYPES_TASK_H
#define _LINUX_MM_TYPES_TASK_H

/*
 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
 *
 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
 */

#include <linux/types.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cpumask.h>

#include <asm/page.h>

#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
#include <asm/tlbbatch.h>
#endif

#define USE_SPLIT_PTE_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS	(USE_SPLIT_PTE_PTLOCKS && \
		IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS	(SPINLOCK_SIZE > BITS_PER_LONG/8)

/*
 * When updating this, please also update struct resident_page_types[] in
 * kernel/fork.c
 */
enum {
	MM_FILEPAGES,	/* Resident file mapping pages */
	MM_ANONPAGES,	/* Resident anonymous pages */
	MM_SWAPENTS,	/* Anonymous swap entries */
	MM_SHMEMPAGES,	/* Resident shared memory pages */
	NR_MM_COUNTERS
};

struct page_frag {
	struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
	__u32 offset;
	__u32 size;
#else
	__u16 offset;
	__u16 size;
#endif
};

/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
	/*
	 * The arch code makes the following promise: generic code can modify a
	 * PTE, then call arch_tlbbatch_add_pending() (which internally provides
	 * all needed barriers), then call arch_tlbbatch_flush(), and the entries
	 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
	 * returns.
	 */
	struct arch_tlbflush_unmap_batch arch;

	/* True if a flush is needed. */
	bool flush_required;

	/*
	 * If true then the PTE was dirty when unmapped. The entry must be
	 * flushed before IO is initiated or a stale TLB entry potentially
	 * allows an update without redirtying the page.
	 */
	bool writable;
#endif
};

#endif /* _LINUX_MM_TYPES_TASK_H */