Contributors: 16
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
MinChan Kim |
60 |
29.56% |
3 |
13.64% |
Kamezawa Hiroyuki |
38 |
18.72% |
1 |
4.55% |
Michal Nazarewicz |
18 |
8.87% |
2 |
9.09% |
Mel Gorman |
16 |
7.88% |
4 |
18.18% |
JoonSoo Kim |
16 |
7.88% |
1 |
4.55% |
Yasunori Goto |
12 |
5.91% |
1 |
4.55% |
Cody P Schafer |
10 |
4.93% |
1 |
4.55% |
Michal Hocko |
9 |
4.43% |
1 |
4.55% |
Dave Hansen |
6 |
2.96% |
1 |
4.55% |
Zi Yan |
5 |
2.46% |
1 |
4.55% |
Nicholas Piggin |
4 |
1.97% |
1 |
4.55% |
David Hildenbrand |
3 |
1.48% |
1 |
4.55% |
Wen Congyang |
2 |
0.99% |
1 |
4.55% |
Vlastimil Babka |
2 |
0.99% |
1 |
4.55% |
Greg Kroah-Hartman |
1 |
0.49% |
1 |
4.55% |
Liu Ping Fan |
1 |
0.49% |
1 |
4.55% |
Total |
203 |
|
22 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGEISOLATION_H
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
static inline bool has_isolate_pageblock(struct zone *zone)
{
return zone->nr_isolate_pageblock;
}
static inline bool is_migrate_isolate_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
}
static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
#else
static inline bool has_isolate_pageblock(struct zone *zone)
{
return false;
}
static inline bool is_migrate_isolate_page(struct page *page)
{
return false;
}
static inline bool is_migrate_isolate(int migratetype)
{
return false;
}
#endif
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int flags, gfp_t gfp_flags);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int isol_flags);
#endif