Release 4.7 include/linux/compaction.h
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
/* Return values for compact_zone() and try_to_compact_pages() */
/* When adding new states, please adjust include/trace/events/compaction.h */
enum compact_result {
/* For more detailed tracepoint output - internal to compaction */
COMPACT_NOT_SUITABLE_ZONE,
/*
* compaction didn't start as it was not possible or direct reclaim
* was more suitable
*/
COMPACT_SKIPPED,
/* compaction didn't start as it was deferred due to past failures */
COMPACT_DEFERRED,
/* compaction not active last round */
COMPACT_INACTIVE = COMPACT_DEFERRED,
/* For more detailed tracepoint output - internal to compaction */
COMPACT_NO_SUITABLE_PAGE,
/* compaction should continue to another pageblock */
COMPACT_CONTINUE,
/*
* The full zone was compacted scanned but wasn't successfull to compact
* suitable pages.
*/
COMPACT_COMPLETE,
/*
* direct compaction has scanned part of the zone but wasn't successfull
* to compact suitable pages.
*/
COMPACT_PARTIAL_SKIPPED,
/* compaction terminated prematurely due to lock contentions */
COMPACT_CONTENDED,
/*
* direct compaction partially compacted a zone and there might be
* suitable pages
*/
COMPACT_PARTIAL,
};
/* Used to signal whether compaction detected need_sched() or lock contention */
/* No contention detected */
#define COMPACT_CONTENDED_NONE 0
/* Either need_sched() was true or fatal signal pending */
#define COMPACT_CONTENDED_SCHED 1
/* Zone lock or lru_lock was contended in async compaction */
#define COMPACT_CONTENDED_LOCK 2
struct alloc_context; /* in mm/internal.h */
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
extern int sysctl_extfrag_threshold;
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags, int classzone_idx);
extern void defer_compaction(struct zone *zone, int order);
extern bool compaction_deferred(struct zone *zone, int order);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
extern bool compaction_restarting(struct zone *zone, int order);
/* Compaction has made some progress and retrying makes sense */
static inline bool compaction_made_progress(enum compact_result result)
{
/*
* Even though this might sound confusing this in fact tells us
* that the compaction successfully isolated and migrated some
* pageblocks.
*/
if (result == COMPACT_PARTIAL)
return true;
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
/* Compaction has failed and it doesn't make much sense to keep retrying. */
static inline bool compaction_failed(enum compact_result result)
{
/* All zones were scanned completely and still not result. */
if (result == COMPACT_COMPLETE)
return true;
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
/*
* Compaction has backed off for some reason. It might be throttling or
* lock contention. Retrying is still worthwhile.
*/
static inline bool compaction_withdrawn(enum compact_result result)
{
/*
* Compaction backed off due to watermark checks for order-0
* so the regular reclaim has to try harder and reclaim something.
*/
if (result == COMPACT_SKIPPED)
return true;
/*
* If compaction is deferred for high-order allocations, it is
* because sync compaction recently failed. If this is the case
* and the caller requested a THP allocation, we do not want
* to heavily disrupt the system, so we fail the allocation
* instead of entering direct reclaim.
*/
if (result == COMPACT_DEFERRED)
return true;
/*
* If compaction in async mode encounters contention or blocks higher
* priority task we back off early rather than cause stalls.
*/
if (result == COMPACT_CONTENDED)
return true;
/*
* Page scanners have met but we haven't scanned full zones so this
* is a back off in fact.
*/
if (result == COMPACT_PARTIAL_SKIPPED)
return true;
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 54 | 100.00% | 1 | 100.00% |
| Total | 54 | 100.00% | 1 | 100.00% |
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
extern int kcompactd_run(int nid);
extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
#else
static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, int alloc_flags,
const struct alloc_context *ac,
enum migrate_mode mode, int *contended)
{
return COMPACT_CONTINUE;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 20 | 57.14% | 3 | 42.86% |
vlastimil babka | vlastimil babka | 10 | 28.57% | 2 | 28.57% |
david rientjes | david rientjes | 3 | 8.57% | 1 | 14.29% |
michal hocko | michal hocko | 2 | 5.71% | 1 | 14.29% |
| Total | 35 | 100.00% | 7 | 100.00% |
static inline void compact_pgdat(pg_data_t *pgdat, int order)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rik van riel | rik van riel | 11 | 84.62% | 1 | 50.00% |
andrew morton | andrew morton | 2 | 15.38% | 1 | 50.00% |
| Total | 13 | 100.00% | 2 | 100.00% |
static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 10 | 100.00% | 1 | 100.00% |
| Total | 10 | 100.00% | 1 | 100.00% |
static inline enum compact_result compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx)
{
return COMPACT_SKIPPED;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 17 | 68.00% | 1 | 33.33% |
vlastimil babka | vlastimil babka | 6 | 24.00% | 1 | 33.33% |
michal hocko | michal hocko | 2 | 8.00% | 1 | 33.33% |
| Total | 25 | 100.00% | 3 | 100.00% |
static inline void defer_compaction(struct zone *zone, int order)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 11 | 78.57% | 1 | 50.00% |
rik van riel | rik van riel | 3 | 21.43% | 1 | 50.00% |
| Total | 14 | 100.00% | 2 | 100.00% |
static inline bool compaction_deferred(struct zone *zone, int order)
{
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 14 | 77.78% | 1 | 33.33% |
rik van riel | rik van riel | 3 | 16.67% | 1 | 33.33% |
gavin shan | gavin shan | 1 | 5.56% | 1 | 33.33% |
| Total | 18 | 100.00% | 3 | 100.00% |
static inline bool compaction_made_progress(enum compact_result result)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 14 | 100.00% | 1 | 100.00% |
| Total | 14 | 100.00% | 1 | 100.00% |
static inline bool compaction_failed(enum compact_result result)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 14 | 100.00% | 1 | 100.00% |
| Total | 14 | 100.00% | 1 | 100.00% |
static inline bool compaction_withdrawn(enum compact_result result)
{
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 14 | 100.00% | 1 | 100.00% |
| Total | 14 | 100.00% | 1 | 100.00% |
static inline int kcompactd_run(int nid)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
vlastimil babka | vlastimil babka | 13 | 100.00% | 1 | 100.00% |
| Total | 13 | 100.00% | 1 | 100.00% |
static inline void kcompactd_stop(int nid)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
vlastimil babka | vlastimil babka | 9 | 100.00% | 1 | 100.00% |
| Total | 9 | 100.00% | 1 | 100.00% |
static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
vlastimil babka | vlastimil babka | 16 | 100.00% | 1 | 100.00% |
| Total | 16 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);
#else
static inline int compaction_register_node(struct node *node)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 15 | 100.00% | 1 | 100.00% |
| Total | 15 | 100.00% | 1 | 100.00% |
static inline void compaction_unregister_node(struct node *node)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 11 | 100.00% | 1 | 100.00% |
| Total | 11 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
#endif /* _LINUX_COMPACTION_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 299 | 43.08% | 11 | 35.48% |
michal hocko | michal hocko | 202 | 29.11% | 6 | 19.35% |
vlastimil babka | vlastimil babka | 137 | 19.74% | 7 | 22.58% |
rik van riel | rik van riel | 34 | 4.90% | 2 | 6.45% |
joonsoo kim | joonsoo kim | 8 | 1.15% | 1 | 3.23% |
david rientjes | david rientjes | 6 | 0.86% | 1 | 3.23% |
eric b munson | eric b munson | 4 | 0.58% | 1 | 3.23% |
andrew morton | andrew morton | 3 | 0.43% | 1 | 3.23% |
gavin shan | gavin shan | 1 | 0.14% | 1 | 3.23% |
| Total | 694 | 100.00% | 31 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.