cregit-Linux how code gets into the kernel

Release 4.15 include/linux/mempolicy.h

Directory: include/linux
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * NUMA memory policies for Linux.
 * Copyright 2003,2004 Andi Kleen SuSE Labs
 */
#ifndef _LINUX_MEMPOLICY_H

#define _LINUX_MEMPOLICY_H 1


#include <linux/mmzone.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <uapi/linux/mempolicy.h>

struct mm_struct;

#ifdef CONFIG_NUMA

/*
 * Describe a memory policy.
 *
 * A mempolicy can be either associated with a process or with a VMA.
 * For VMA related allocations the VMA policy is preferred, otherwise
 * the process policy is used. Interrupts ignore the memory policy
 * of the current process.
 *
 * Locking policy for interlave:
 * In process context there is no locking because only the process accesses
 * its own state. All vma manipulation is somewhat protected by a down_read on
 * mmap_sem.
 *
 * Freeing policy:
 * Mempolicy objects are reference counted.  A mempolicy will be freed when
 * mpol_put() decrements the reference count to zero.
 *
 * Duplicating policy objects:
 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
 * to the new storage.  The reference count of the new object is initialized
 * to 1, representing the caller of mpol_dup().
 */

struct mempolicy {
	
atomic_t refcnt;
	
unsigned short mode; 	/* See MPOL_* above */
	
unsigned short flags;	/* See set_mempolicy() MPOL_F_* above */
	union {
		
short 		 preferred_node; /* preferred */
		
nodemask_t	 nodes;		/* interleave/bind */
		/* undefined for default */
	
} v;
	union {
		
nodemask_t cpuset_mems_allowed;	/* relative to these nodes */
		
nodemask_t user_nodemask;	/* nodemask passed by user */
	
} w;
};

/*
 * Support for managing mempolicy data objects (clone, copy, destroy)
 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
 */

extern void __mpol_put(struct mempolicy *pol);

static inline void mpol_put(struct mempolicy *pol) { if (pol) __mpol_put(pol); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1990.48%150.00%
Lee Schermerhorn29.52%150.00%
Total21100.00%2100.00%

/* * Does mempolicy pol need explicit unref after use? * Currently only needed for shared policies. */
static inline int mpol_needs_cond_ref(struct mempolicy *pol) { return (pol && (pol->flags & MPOL_F_SHARED)); }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn25100.00%1100.00%
Total25100.00%1100.00%


static inline void mpol_cond_put(struct mempolicy *pol) { if (mpol_needs_cond_ref(pol)) __mpol_put(pol); }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn24100.00%1100.00%
Total24100.00%1100.00%

extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
static inline struct mempolicy *mpol_dup(struct mempolicy *pol) { if (pol) pol = __mpol_dup(pol); return pol; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2692.86%150.00%
Lee Schermerhorn27.14%150.00%
Total28100.00%2100.00%

#define vma_policy(vma) ((vma)->vm_policy)
static inline void mpol_get(struct mempolicy *pol) { if (pol) atomic_inc(&pol->refcnt); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton24100.00%1100.00%
Total24100.00%1100.00%

extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (a == b) return true; return __mpol_equal(a, b); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3294.12%150.00%
Motohiro Kosaki25.88%150.00%
Total34100.00%2100.00%

/* * Tree of shared policies for a shared memory region. * Maintain the policies in a pseudo mm that contains vmas. The vmas * carry the policy. As a special twist the pseudo mm is indexed in pages, not * bytes, so that we can work with shared memory segments bigger than * unsigned long. */ struct sp_node { struct rb_node nd; unsigned long start, end; struct mempolicy *policy; }; struct shared_policy { struct rb_root root; rwlock_t lock; }; int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *new); void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); extern int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone;
static inline void check_highest_zone(enum zone_type k) { if (k > policy_zone && k != ZONE_MOVABLE) policy_zone = k; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter2184.00%266.67%
Mel Gorman416.00%133.33%
Total25100.00%3100.00%

int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags); #ifdef CONFIG_TMPFS extern int mpol_parse_str(char *str, struct mempolicy **mpol); #endif extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */
static inline bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) return false; /* * DAX device mappings require predictable access latency, so avoid * incurring periodic faults. */ if (vma_is_dax(vma)) return false; #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION if (vma->vm_flags & VM_HUGETLB) return false; #endif /* * Migration allocates pages in the highest zone. If we cannot * do so then migration (at least from node to node) is not * possible. */ if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Gerald Schaefer5061.73%125.00%
Naoya Horiguchi1518.52%125.00%
Dan J Williams1113.58%125.00%
Yaowei Bai56.17%125.00%
Total81100.00%4100.00%

extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); #else struct mempolicy {};
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { return true; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1890.00%150.00%
Motohiro Kosaki210.00%150.00%
Total20100.00%2100.00%


static inline void mpol_put(struct mempolicy *p) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1090.91%150.00%
Lee Schermerhorn19.09%150.00%
Total11100.00%2100.00%


static inline void mpol_cond_put(struct mempolicy *pol) { }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn11100.00%1100.00%
Total11100.00%1100.00%


static inline void mpol_get(struct mempolicy *pol) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton11100.00%1100.00%
Total11100.00%1100.00%

struct shared_policy {};
static inline void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1062.50%125.00%
Lee Schermerhorn425.00%125.00%
David Rientjes16.25%125.00%
Robin Holt16.25%125.00%
Total16100.00%4100.00%


static inline void mpol_free_shared_policy(struct shared_policy *p) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton11100.00%1100.00%
Total11100.00%1100.00%


static inline struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins21100.00%1100.00%
Total21100.00%1100.00%

#define vma_policy(vma) NULL
static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov1785.00%150.00%
Andrew Morton315.00%150.00%
Total20100.00%2100.00%


static inline void numa_policy_init(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen8100.00%1100.00%
Total8100.00%1100.00%


static inline void numa_default_policy(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen8100.00%1100.00%
Total8100.00%1100.00%


static inline void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) { }

Contributors

PersonTokensPropCommitsCommitProp
Paul Jackson16100.00%2100.00%
Total16100.00%2100.00%


static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { }

Contributors

PersonTokensPropCommitsCommitProp
Paul Jackson15100.00%1100.00%
Total15100.00%1100.00%


static inline int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { *mpol = NULL; *nodemask = NULL; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman1841.86%240.00%
Christoph Lameter1739.53%120.00%
Lee Schermerhorn613.95%120.00%
Vlastimil Babka24.65%120.00%
Total43100.00%5100.00%


static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn14100.00%1100.00%
Total14100.00%1100.00%


static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Jackson2692.86%150.00%
Andrew Morton27.14%150.00%
Total28100.00%2100.00%


static inline void check_highest_zone(int k) { }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter9100.00%1100.00%
Total9100.00%1100.00%

#ifdef CONFIG_TMPFS
static inline int mpol_parse_str(char *str, struct mempolicy **mpol) { return 1; /* error */ }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn21100.00%2100.00%
Total21100.00%2100.00%

#endif
static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn26100.00%1100.00%
Total26100.00%1100.00%


static inline void mpol_put_task_policy(struct task_struct *task) { }

Contributors

PersonTokensPropCommitsCommitProp
David Rientjes11100.00%1100.00%
Total11100.00%1100.00%

#endif /* CONFIG_NUMA */ #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton35132.74%23.85%
Lee Schermerhorn21920.43%917.31%
Paul Jackson908.40%59.62%
Christoph Lameter888.21%59.62%
David Rientjes565.22%713.46%
Oleg Nesterov545.04%23.85%
Gerald Schaefer545.04%11.92%
Mel Gorman413.82%59.62%
Andi Kleen373.45%35.77%
Hugh Dickins211.96%11.92%
Naoya Horiguchi151.40%11.92%
Dan J Williams141.31%11.92%
Stephen Wilson70.65%23.85%
Yaowei Bai50.47%11.92%
Motohiro Kosaki50.47%11.92%
David Howells40.37%11.92%
Vlastimil Babka40.37%11.92%
Robin Holt30.28%11.92%
Ralf Bächle20.19%11.92%
Nathan Zimmer10.09%11.92%
Greg Kroah-Hartman10.09%11.92%
Total1072100.00%52100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.