Release 4.7 include/linux/migrate.h
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H
#include <linux/mm.h>
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
typedef struct page *new_page_t(struct page *page, unsigned long private,
int **reason);
typedef void free_page_t(struct page *page, unsigned long private);
/*
* Return values from addresss_space_operations.migratepage():
* - negative errno on page migration failure;
* - zero on page migration success;
*/
#define MIGRATEPAGE_SUCCESS 0
enum migrate_reason {
MR_COMPACTION,
MR_MEMORY_FAILURE,
MR_MEMORY_HOTPLUG,
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
MR_CMA,
MR_TYPES
};
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
extern char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
extern int migrate_prep(void);
extern int migrate_prep_local(void);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
struct buffer_head *head, enum migrate_mode mode,
int extra_count);
#else
static inline void putback_movable_pages(struct list_head *l) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rafael aquini | rafael aquini | 11 | 100.00% | 1 | 100.00% |
| Total | 11 | 100.00% | 1 | 100.00% |
static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
int reason)
{ return -ENOSYS; }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 17 | 51.52% | 3 | 42.86% |
mel gorman | mel gorman | 6 | 18.18% | 2 | 28.57% |
naoya horiguchi | naoya horiguchi | 6 | 18.18% | 1 | 14.29% |
david rientjes | david rientjes | 4 | 12.12% | 1 | 14.29% |
| Total | 33 | 100.00% | 7 | 100.00% |
static inline int migrate_prep(void) { return -ENOSYS; }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 11 | 84.62% | 1 | 50.00% |
mel gorman | mel gorman | 2 | 15.38% | 1 | 50.00% |
| Total | 13 | 100.00% | 2 | 100.00% |
static inline int migrate_prep_local(void) { return -ENOSYS; }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 11 | 84.62% | 1 | 50.00% |
christoph lameter | christoph lameter | 2 | 15.38% | 1 | 50.00% |
| Total | 13 | 100.00% | 2 | 100.00% |
static inline void migrate_page_copy(struct page *newpage,
struct page *page) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
naoya horiguchi | naoya horiguchi | 16 | 100.00% | 1 | 100.00% |
| Total | 16 | 100.00% | 1 | 100.00% |
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
{
return -ENOSYS;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
naoya horiguchi | naoya horiguchi | 26 | 100.00% | 2 | 100.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
#else
static inline bool pmd_trans_migrating(pmd_t pmd)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 13 | 100.00% | 1 | 100.00% |
| Total | 13 | 100.00% | 1 | 100.00% |
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
peter zijlstra | peter zijlstra | 20 | 80.00% | 1 | 50.00% |
mel gorman | mel gorman | 5 | 20.00% | 1 | 50.00% |
| Total | 25 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct vm_area_struct *vma,
pmd_t *pmd, pmd_t entry,
unsigned long address,
struct page *page, int node);
#else
static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct vm_area_struct *vma,
pmd_t *pmd, pmd_t entry,
unsigned long address,
struct page *page, int node)
{
return -EAGAIN;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 40 | 100.00% | 1 | 100.00% |
| Total | 40 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
#endif /* _LINUX_MIGRATE_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 177 | 34.84% | 9 | 31.03% |
christoph lameter | christoph lameter | 113 | 22.24% | 7 | 24.14% |
naoya horiguchi | naoya horiguchi | 82 | 16.14% | 2 | 6.90% |
peter zijlstra | peter zijlstra | 38 | 7.48% | 1 | 3.45% |
gu zheng | gu zheng | 29 | 5.71% | 1 | 3.45% |
rafael aquini | rafael aquini | 24 | 4.72% | 2 | 6.90% |
david rientjes | david rientjes | 24 | 4.72% | 1 | 3.45% |
vlastimil babka | vlastimil babka | 11 | 2.17% | 1 | 3.45% |
andrew morton | andrew morton | 3 | 0.59% | 1 | 3.45% |
benjamin lahaise | benjamin lahaise | 3 | 0.59% | 1 | 3.45% |
hugh dickins | hugh dickins | 2 | 0.39% | 1 | 3.45% |
konstantin khlebnikov | konstantin khlebnikov | 1 | 0.20% | 1 | 3.45% |
minchan kim | minchan kim | 1 | 0.20% | 1 | 3.45% |
| Total | 508 | 100.00% | 29 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.