cregit-Linux how code gets into the kernel

Release 4.12 include/linux/compaction.h

Directory: include/linux
#ifndef _LINUX_COMPACTION_H

#define _LINUX_COMPACTION_H

/*
 * Determines how hard direct compaction should try to succeed.
 * Lower value means higher priority, analogically to reclaim priority.
 */

enum compact_priority {
	
COMPACT_PRIO_SYNC_FULL,
	
MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL,
	
COMPACT_PRIO_SYNC_LIGHT,
	
MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
	
DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
	
COMPACT_PRIO_ASYNC,
	
INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
};

/* Return values for compact_zone() and try_to_compact_pages() */
/* When adding new states, please adjust include/trace/events/compaction.h */

enum compact_result {
	/* For more detailed tracepoint output - internal to compaction */
	
COMPACT_NOT_SUITABLE_ZONE,
	/*
         * compaction didn't start as it was not possible or direct reclaim
         * was more suitable
         */
	
COMPACT_SKIPPED,
	/* compaction didn't start as it was deferred due to past failures */
	
COMPACT_DEFERRED,

	/* compaction not active last round */
	
COMPACT_INACTIVE = COMPACT_DEFERRED,

	/* For more detailed tracepoint output - internal to compaction */
	
COMPACT_NO_SUITABLE_PAGE,
	/* compaction should continue to another pageblock */
	
COMPACT_CONTINUE,

	/*
         * The full zone was compacted scanned but wasn't successfull to compact
         * suitable pages.
         */
	
COMPACT_COMPLETE,
	/*
         * direct compaction has scanned part of the zone but wasn't successfull
         * to compact suitable pages.
         */
	
COMPACT_PARTIAL_SKIPPED,

	/* compaction terminated prematurely due to lock contentions */
	
COMPACT_CONTENDED,

	/*
         * direct compaction terminated after concluding that the allocation
         * should now succeed
         */
	
COMPACT_SUCCESS,
};

struct alloc_context; /* in mm/internal.h */

/*
 * Number of free order-0 pages that should be available above given watermark
 * to make sure compaction has reasonable chance of not running out of free
 * pages that it needs to isolate as migration target during its work.
 */

static inline unsigned long compact_gap(unsigned int order) { /* * Although all the isolations for migration are temporary, compaction * free scanner may have up to 1 << order pages on its list and then * try to split an (order - 1) free page. At that point, a gap of * 1 << order might not be enough, so it's safer to require twice that * amount. Note that the number of pages on the list is also * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum * that the migrate scanner can have isolated on migrate list, and free * scanner is only invoked when the number of isolated free pages is * lower than that. But it's not worth to complicate the formula here * as a bigger gap for higher orders than strictly necessary can also * improve chances of compaction success. */ return 2UL << order; }

Contributors

PersonTokensPropCommitsCommitProp
Vlastimil Babka18100.00%1100.00%
Total18100.00%1100.00%

#ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_extfrag_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int classzone_idx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); extern bool compaction_restarting(struct zone *zone, int order); /* Compaction has made some progress and retrying makes sense */
static inline bool compaction_made_progress(enum compact_result result) { /* * Even though this might sound confusing this in fact tells us * that the compaction successfully isolated and migrated some * pageblocks. */ if (result == COMPACT_SUCCESS) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko2395.83%150.00%
Vlastimil Babka14.17%150.00%
Total24100.00%2100.00%

/* Compaction has failed and it doesn't make much sense to keep retrying. */
static inline bool compaction_failed(enum compact_result result) { /* All zones were scanned completely and still not result. */ if (result == COMPACT_COMPLETE) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko24100.00%1100.00%
Total24100.00%1100.00%

/* * Compaction has backed off for some reason. It might be throttling or * lock contention. Retrying is still worthwhile. */
static inline bool compaction_withdrawn(enum compact_result result) { /* * Compaction backed off due to watermark checks for order-0 * so the regular reclaim has to try harder and reclaim something. */ if (result == COMPACT_SKIPPED) return true; /* * If compaction is deferred for high-order allocations, it is * because sync compaction recently failed. If this is the case * and the caller requested a THP allocation, we do not want * to heavily disrupt the system, so we fail the allocation * instead of entering direct reclaim. */ if (result == COMPACT_DEFERRED) return true; /* * If compaction in async mode encounters contention or blocks higher * priority task we back off early rather than cause stalls. */ if (result == COMPACT_CONTENDED) return true; /* * Page scanners have met but we haven't scanned full zones so this * is a back off in fact. */ if (result == COMPACT_PARTIAL_SKIPPED) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko54100.00%1100.00%
Total54100.00%1100.00%

bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else
static inline void reset_isolation_suitable(pg_data_t *pgdat) { }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman10100.00%1100.00%
Total10100.00%1100.00%


static inline enum compact_result compaction_suitable(struct zone *zone, int order, int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman1768.00%133.33%
Vlastimil Babka624.00%133.33%
Michal Hocko28.00%133.33%
Total25100.00%3100.00%


static inline void defer_compaction(struct zone *zone, int order) { }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman1178.57%150.00%
Rik Van Riel321.43%150.00%
Total14100.00%2100.00%


static inline bool compaction_deferred(struct zone *zone, int order) { return true; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman1477.78%133.33%
Rik Van Riel316.67%133.33%
Gavin Shan15.56%133.33%
Total18100.00%3100.00%


static inline bool compaction_made_progress(enum compact_result result) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko14100.00%1100.00%
Total14100.00%1100.00%


static inline bool compaction_failed(enum compact_result result) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko14100.00%1100.00%
Total14100.00%1100.00%


static inline bool compaction_withdrawn(enum compact_result result) { return true; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko14100.00%1100.00%
Total14100.00%1100.00%


static inline int kcompactd_run(int nid) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Vlastimil Babka13100.00%1100.00%
Total13100.00%1100.00%


static inline void kcompactd_stop(int nid) { }

Contributors

PersonTokensPropCommitsCommitProp
Vlastimil Babka9100.00%1100.00%
Total9100.00%1100.00%


static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) { }

Contributors

PersonTokensPropCommitsCommitProp
Vlastimil Babka16100.00%1100.00%
Total16100.00%1100.00%

#endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) struct node; extern int compaction_register_node(struct node *node); extern void compaction_unregister_node(struct node *node); #else
static inline int compaction_register_node(struct node *node) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman15100.00%1100.00%
Total15100.00%1100.00%


static inline void compaction_unregister_node(struct node *node) { }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman11100.00%1100.00%
Total11100.00%1100.00%

#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ #endif /* _LINUX_COMPACTION_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman27541.48%1029.41%
Michal Hocko19729.71%617.65%
Vlastimil Babka16124.28%1132.35%
Rik Van Riel121.81%12.94%
JoonSoo Kim81.21%12.94%
Eric B Munson40.60%12.94%
MinChan Kim30.45%12.94%
Andrew Morton10.15%12.94%
David Rientjes10.15%12.94%
Gavin Shan10.15%12.94%
Total663100.00%34100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.