Contributors: 20
Author Tokens Token Proportion Commits Commit Proportion
MinChan Kim 66 29.86% 4 13.79%
Kamezawa Hiroyuki 35 15.84% 1 3.45%
Michal Nazarewicz 21 9.50% 3 10.34%
JoonSoo Kim 16 7.24% 1 3.45%
Mel Gorman 16 7.24% 4 13.79%
Yasunori Goto 12 5.43% 1 3.45%
Cody P Schafer 10 4.52% 1 3.45%
Michal Hocko 9 4.07% 1 3.45%
Dave Hansen 6 2.71% 1 3.45%
Linus Torvalds (pre-git) 6 2.71% 2 6.90%
Zi Yan 5 2.26% 1 3.45%
Nicholas Piggin 4 1.81% 1 3.45%
Hugh Dickins 4 1.81% 1 3.45%
David Hildenbrand 3 1.36% 1 3.45%
Vlastimil Babka 2 0.90% 1 3.45%
Wen Congyang 2 0.90% 1 3.45%
Qian Cai 1 0.45% 1 3.45%
Harvey Harrison 1 0.45% 1 3.45%
Liu Ping Fan 1 0.45% 1 3.45%
Greg Kroah-Hartman 1 0.45% 1 3.45%
Total 221 29


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGEISOLATION_H
#define __LINUX_PAGEISOLATION_H

#ifdef CONFIG_MEMORY_ISOLATION
static inline bool has_isolate_pageblock(struct zone *zone)
{
	return zone->nr_isolate_pageblock;
}
static inline bool is_migrate_isolate_page(struct page *page)
{
	return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
}
static inline bool is_migrate_isolate(int migratetype)
{
	return migratetype == MIGRATE_ISOLATE;
}
#else
static inline bool has_isolate_pageblock(struct zone *zone)
{
	return false;
}
static inline bool is_migrate_isolate_page(struct page *page)
{
	return false;
}
static inline bool is_migrate_isolate(int migratetype)
{
	return false;
}
#endif

#define MEMORY_OFFLINE	0x1
#define REPORT_FAILURE	0x2

void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
				int migratetype, int *num_movable);

/*
 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
 */
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
			 int migratetype, int flags, gfp_t gfp_flags);

/*
 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
 * target range is [start_pfn, end_pfn)
 */
void
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
			int migratetype);

/*
 * Test all pages in [start_pfn, end_pfn) are isolated or not.
 */
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
			int isol_flags);

struct page *alloc_migrate_target(struct page *page, unsigned long private);

#endif