Contributors: 33
Author Tokens Token Proportion Commits Commit Proportion
Christoph Lameter 123 15.51% 10 13.33%
Matthew Wilcox 112 14.12% 7 9.33%
Jérôme Glisse 109 13.75% 4 5.33%
Alistair Popple 68 8.58% 5 6.67%
Peter Zijlstra 63 7.94% 1 1.33%
JoonSoo Kim 33 4.16% 2 2.67%
Christoph Hellwig 32 4.04% 3 4.00%
Kefeng Wang 22 2.77% 2 2.67%
Ralph Campbell 21 2.65% 1 1.33%
David Rientjes 21 2.65% 1 1.33%
Naoya Horiguchi 20 2.52% 3 4.00%
Huacai Chen 20 2.52% 1 1.33%
Mel Gorman 19 2.40% 5 6.67%
MinChan Kim 17 2.14% 2 2.67%
Hugh Dickins 16 2.02% 3 4.00%
Rafael Aquini 14 1.77% 1 1.33%
Yang Shi 10 1.26% 1 1.33%
David Hildenbrand 10 1.26% 3 4.00%
Linus Torvalds (pre-git) 8 1.01% 4 5.33%
Vlastimil Babka 8 1.01% 1 1.33%
Alex Sierra 6 0.76% 1 1.33%
Yisheng Xie 6 0.76% 1 1.33%
Andrew Morton 6 0.76% 2 2.67%
Matthew Brost 5 0.63% 1 1.33%
Kamezawa Hiroyuki 5 0.63% 1 1.33%
Oscar Salvador 5 0.63% 2 2.67%
Gu Zheng 3 0.38% 1 1.33%
Pushkar Jambhlekar 3 0.38% 1 1.33%
Baolin Wang 3 0.38% 1 1.33%
Benjamin LaHaise 2 0.25% 1 1.33%
John Hubbard 1 0.13% 1 1.33%
Alexey Dobriyan 1 0.13% 1 1.33%
Greg Kroah-Hartman 1 0.13% 1 1.33%
Total 793 75


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H

#include <linux/mm.h>
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>

typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
typedef void free_folio_t(struct folio *folio, unsigned long private);

struct migration_target_control;

/**
 * struct movable_operations - Driver page migration
 * @isolate_page:
 * The VM calls this function to prepare the page to be moved.  The page
 * is locked and the driver should not unlock it.  The driver should
 * return ``true`` if the page is movable and ``false`` if it is not
 * currently movable.  After this function returns, the VM uses the
 * page->lru field, so the driver must preserve any information which
 * is usually stored here.
 *
 * @migrate_page:
 * After isolation, the VM calls this function with the isolated
 * @src page.  The driver should copy the contents of the
 * @src page to the @dst page and set up the fields of @dst page.
 * Both pages are locked.
 * If page migration is successful, the driver should return 0.
 * If the driver cannot migrate the page at the moment, it can return
 * -EAGAIN.  The VM interprets this as a temporary migration failure and
 * will retry it later.  Any other error value is a permanent migration
 * failure and migration will not be retried.
 * The driver shouldn't touch the @src->lru field while in the
 * migrate_page() function.  It may write to @dst->lru.
 *
 * @putback_page:
 * If migration fails on the isolated page, the VM informs the driver
 * that the page is no longer a candidate for migration by calling
 * this function.  The driver should put the isolated page back into
 * its own data structure.
 */
struct movable_operations {
	bool (*isolate_page)(struct page *, isolate_mode_t);
	int (*migrate_page)(struct page *dst, struct page *src,
			enum migrate_mode);
	void (*putback_page)(struct page *);
};

/* Defined in mm/debug.c: */
extern const char *migrate_reason_names[MR_TYPES];

#ifdef CONFIG_MIGRATION

void putback_movable_pages(struct list_head *l);
int migrate_folio(struct address_space *mapping, struct folio *dst,
		struct folio *src, enum migrate_mode mode);
int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
		  unsigned long private, enum migrate_mode mode, int reason,
		  unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
bool isolate_folio_to_list(struct folio *folio, struct list_head *list);

int migrate_huge_page_move_mapping(struct address_space *mapping,
		struct folio *dst, struct folio *src);
void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
		__releases(ptl);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
		struct folio *newfolio, struct folio *folio, int extra_count);
int set_movable_ops(const struct movable_operations *ops, enum pagetype type);

#else

static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_folio_t new,
		free_folio_t free, unsigned long private,
		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
	{ return -ENOSYS; }
static inline struct folio *alloc_migration_target(struct folio *src,
		unsigned long private)
	{ return NULL; }
static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
	{ return false; }
static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
	{ return false; }

static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
				  struct folio *dst, struct folio *src)
{
	return -ENOSYS;
}
static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
{
	return -ENOSYS;
}

#endif /* CONFIG_MIGRATION */

#ifdef CONFIG_NUMA_BALANCING
int migrate_misplaced_folio_prepare(struct folio *folio,
		struct vm_area_struct *vma, int node);
int migrate_misplaced_folio(struct folio *folio, int node);
#else
static inline int migrate_misplaced_folio_prepare(struct folio *folio,
		struct vm_area_struct *vma, int node)
{
	return -EAGAIN; /* can't migrate now */
}
static inline int migrate_misplaced_folio(struct folio *folio, int node)
{
	return -EAGAIN; /* can't migrate now */
}
#endif /* CONFIG_NUMA_BALANCING */

#ifdef CONFIG_MIGRATION

/*
 * Watch out for PAE architecture, which has an unsigned long, and might not
 * have enough bits to store all physical address and flags. So far we have
 * enough room for all our flags.
 */
#define MIGRATE_PFN_VALID	(1UL << 0)
#define MIGRATE_PFN_MIGRATE	(1UL << 1)
#define MIGRATE_PFN_WRITE	(1UL << 3)
#define MIGRATE_PFN_SHIFT	6

static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
{
	if (!(mpfn & MIGRATE_PFN_VALID))
		return NULL;
	return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
}

static inline unsigned long migrate_pfn(unsigned long pfn)
{
	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
}

enum migrate_vma_direction {
	MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
	MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
	MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
};

struct migrate_vma {
	struct vm_area_struct	*vma;
	/*
	 * Both src and dst array must be big enough for
	 * (end - start) >> PAGE_SHIFT entries.
	 *
	 * The src array must not be modified by the caller after
	 * migrate_vma_setup(), and must not change the dst array after
	 * migrate_vma_pages() returns.
	 */
	unsigned long		*dst;
	unsigned long		*src;
	unsigned long		cpages;
	unsigned long		npages;
	unsigned long		start;
	unsigned long		end;

	/*
	 * Set to the owner value also stored in page_pgmap(page)->owner
	 * for migrating out of device private memory. The flags also need to
	 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
	 * The caller should always set this field when using mmu notifier
	 * callbacks to avoid device MMU invalidations for device private
	 * pages that are not being migrated.
	 */
	void			*pgmap_owner;
	unsigned long		flags;

	/*
	 * Set to vmf->page if this is being called to migrate a page as part of
	 * a migrate_to_ram() callback.
	 */
	struct page		*fault_page;
};

int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
int migrate_device_range(unsigned long *src_pfns, unsigned long start,
			unsigned long npages);
int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages);
void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
			unsigned long npages);
void migrate_device_finalize(unsigned long *src_pfns,
			unsigned long *dst_pfns, unsigned long npages);

#endif /* CONFIG_MIGRATION */

#endif /* _LINUX_MIGRATE_H */