Contributors: 2
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Ingo Molnar |
185 |
99.46% |
2 |
66.67% |
Greg Kroah-Hartman |
1 |
0.54% |
1 |
33.33% |
Total |
186 |
|
3 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_NUMA_BALANCING_H
#define _LINUX_SCHED_NUMA_BALANCING_H
/*
* This is the interface between the scheduler and the MM that
* implements memory access pattern based NUMA-balancing:
*/
#include <linux/sched.h>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
{
}
static inline pid_t task_numa_group_id(struct task_struct *p)
{
return 0;
}
static inline void set_numabalancing_state(bool enabled)
{
}
static inline void task_numa_free(struct task_struct *p)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
struct page *page, int src_nid, int dst_cpu)
{
return true;
}
#endif
#endif /* _LINUX_SCHED_NUMA_BALANCING_H */