Release 4.7 include/linux/page-isolation.h
#ifndef __LINUX_PAGEISOLATION_H
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
static inline bool has_isolate_pageblock(struct zone *zone)
{
return zone->nr_isolate_pageblock;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
joonsoo kim | joonsoo kim | 17 | 100.00% | 1 | 100.00% |
| Total | 17 | 100.00% | 1 | 100.00% |
static inline bool is_migrate_isolate_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
minchan kim | minchan kim | 20 | 100.00% | 1 | 100.00% |
| Total | 20 | 100.00% | 1 | 100.00% |
static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
minchan kim | minchan kim | 15 | 100.00% | 1 | 100.00% |
| Total | 15 | 100.00% | 1 | 100.00% |
#else
static inline bool has_isolate_pageblock(struct zone *zone)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
joonsoo kim | joonsoo kim | 15 | 100.00% | 1 | 100.00% |
| Total | 15 | 100.00% | 1 | 100.00% |
static inline bool is_migrate_isolate_page(struct page *page)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
minchan kim | minchan kim | 15 | 100.00% | 1 | 100.00% |
| Total | 15 | 100.00% | 1 | 100.00% |
static inline bool is_migrate_isolate(int migratetype)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
minchan kim | minchan kim | 13 | 100.00% | 1 | 100.00% |
| Total | 13 | 100.00% | 1 | 100.00% |
#endif
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
bool skip_hwpoisoned_pages);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype);
int move_freepages(struct zone *zone,
struct page *start_page, struct page *end_page,
int migratetype);
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
* If specified range includes migrate types other than MOVABLE or CMA,
* this will fail with -EBUSY.
*
* For isolating all pages in the range finally, the caller have to
* free all pages in the range. test_page_isolated() can be used for
* test it.
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, bool skip_hwpoisoned_pages);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
* target range is [start_pfn, end_pfn)
*/
int
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype);
/*
* Test all pages in [start_pfn, end_pfn) are isolated or not.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages);
struct page *alloc_migrate_target(struct page *page, unsigned long private,
int **resultp);
#endif
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
minchan kim | minchan kim | 158 | 62.70% | 4 | 50.00% |
kamezawa hiroyuki | kamezawa hiroyuki | 45 | 17.86% | 1 | 12.50% |
joonsoo kim | joonsoo kim | 32 | 12.70% | 1 | 12.50% |
wen congyang | wen congyang | 9 | 3.57% | 1 | 12.50% |
michal nazarewicz | michal nazarewicz | 8 | 3.17% | 1 | 12.50% |
| Total | 252 | 100.00% | 8 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.