cregit-Linux how code gets into the kernel

Release 4.7 mm/hugetlb.c

Directory: mm
/*
 * Generic hugetlb support.
 * (C) Nadia Yvette Chambers, April 2004
 */
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/page-isolation.h>
#include <linux/jhash.h>

#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>

#include <linux/io.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/node.h>
#include "internal.h"


int hugepages_treat_as_movable;


int hugetlb_max_hstate __read_mostly;

unsigned int default_hstate_idx;

struct hstate hstates[HUGE_MAX_HSTATE];
/*
 * Minimum page order among possible hugepage sizes, set to a proper value
 * at boot time.
 */

static unsigned int minimum_order __read_mostly = UINT_MAX;

__initdata LIST_HEAD(huge_boot_pages);

/* for command line parsing */

static struct hstate * __initdata parsed_hstate;

static unsigned long __initdata default_hstate_max_huge_pages;

static unsigned long __initdata default_hstate_size;

static bool __initdata parsed_valid_hugepagesz = true;

/*
 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
 * free_huge_pages, and surplus_huge_pages.
 */

DEFINE_SPINLOCK(hugetlb_lock);

/*
 * Serializes faults on the same logical page.  This is used to
 * prevent spurious OOMs when the hugepage pool is fully utilized.
 */

static int num_fault_mutexes;

struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;

/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);


static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) { bool free = (spool->count == 0) && (spool->used_hpages == 0); spin_unlock(&spool->lock); /* If no pages are used, and no other handles to the subpool * remain, give up any reservations mased on minimum size and * free the subpool */ if (free) { if (spool->min_hpages != -1) hugetlb_acct_memory(spool->hstate, -spool->min_hpages); kfree(spool); } }

Contributors

PersonTokensPropCommitsCommitProp
david gibsondavid gibson4866.67%150.00%
mike kravetzmike kravetz2433.33%150.00%
Total72100.00%2100.00%


struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, long min_hpages) { struct hugepage_subpool *spool; spool = kzalloc(sizeof(*spool), GFP_KERNEL); if (!spool) return NULL; spin_lock_init(&spool->lock); spool->count = 1; spool->max_hpages = max_hpages; spool->hstate = h; spool->min_hpages = min_hpages; if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { kfree(spool); return NULL; } spool->rsv_hpages = min_hpages; return spool; }

Contributors

PersonTokensPropCommitsCommitProp
david gibsondavid gibson5651.38%133.33%
mike kravetzmike kravetz5348.62%266.67%
Total109100.00%3100.00%


void hugepage_put_subpool(struct hugepage_subpool *spool) { spin_lock(&spool->lock); BUG_ON(!spool->count); spool->count--; unlock_or_release_subpool(spool); }

Contributors

PersonTokensPropCommitsCommitProp
david gibsondavid gibson36100.00%1100.00%
Total36100.00%1100.00%

/* * Subpool accounting for allocating and reserving pages. * Return -ENOMEM if there are not enough resources to satisfy the * the request. Otherwise, return the number of pages by which the * global pools must be adjusted (upward). The returned value may * only be different than the passed value (delta) in the case where * a subpool minimum size must be manitained. */
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return ret; spin_lock(&spool->lock); if (spool->max_hpages != -1) { /* maximum size accounting */ if ((spool->used_hpages + delta) <= spool->max_hpages) spool->used_hpages += delta; else { ret = -ENOMEM; goto unlock_ret; } } /* minimum size accounting */ if (spool->min_hpages != -1 && spool->rsv_hpages) { if (delta > spool->rsv_hpages) { /* * Asking for more reserves than those already taken on * behalf of subpool. Return difference. */ ret = delta - spool->rsv_hpages; spool->rsv_hpages = 0; } else { ret = 0; /* reserves already accounted for */ spool->rsv_hpages -= delta; } } unlock_ret: spin_unlock(&spool->lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
mike kravetzmike kravetz7652.05%266.67%
david gibsondavid gibson7047.95%133.33%
Total146100.00%3100.00%

/* * Subpool accounting for freeing and unreserving pages. * Return the number of global page reservations that must be dropped. * The return value may only be different than the passed value (delta) * in the case where a subpool minimum size must be maintained. */
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return delta; spin_lock(&spool->lock); if (spool->max_hpages != -1) /* maximum size accounting */ spool->used_hpages -= delta; /* minimum size accounting */ if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { if (spool->rsv_hpages + delta <= spool->min_hpages) ret = 0; else ret = spool->rsv_hpages + delta - spool->min_hpages; spool->rsv_hpages += delta; if (spool->rsv_hpages > spool->min_hpages) spool->rsv_hpages = spool->min_hpages; } /* * If hugetlbfs_put_super couldn't free spool due to an outstanding * quota reference, free it now. */ unlock_or_release_subpool(spool); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
mike kravetzmike kravetz9672.18%266.67%
david gibsondavid gibson3727.82%133.33%
Total133100.00%3100.00%


static inline struct hugepage_subpool *subpool_inode(struct inode *inode) { return HUGETLBFS_SB(inode->i_sb)->spool; }

Contributors

PersonTokensPropCommitsCommitProp
david gibsondavid gibson24100.00%1100.00%
Total24100.00%1100.00%


static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) { return subpool_inode(file_inode(vma->vm_file)); }

Contributors

PersonTokensPropCommitsCommitProp
david gibsondavid gibson2288.00%150.00%
al viroal viro312.00%150.00%
Total25100.00%2100.00%

/* * Region tracking -- allows tracking of reservations and instantiated pages * across the pages in a mapping. * * The region data structures are embedded into a resv_map and protected * by a resv_map's lock. The set of regions within the resv_map represent * reservations for huge pages, or huge pages that have already been * instantiated within the map. The from and to elements are huge page * indicies into the associated mapping. from indicates the starting index * of the region. to represents the first index past the end of the region. * * For example, a file region structure with from == 0 and to == 4 represents * four huge pages in a mapping. It is important to note that the to element * represents the first element past the end of the region. This is used in * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. * * Interval notation of the form [from, to) will be used to indicate that * the endpoint from is inclusive and to is exclusive. */ struct file_region { struct list_head link; long from; long to; }; /* * Add the huge page range represented by [f, t) to the reserve * map. In the normal case, existing regions will be expanded * to accommodate the specified range. Sufficient regions should * exist for expansion due to the previous call to region_chg * with the same range. However, it is possible that region_del * could have been called after region_chg and modifed the map * in such a way that no region exists to be expanded. In this * case, pull a region descriptor from the cache associated with * the map and use that for the new range. * * Return the number of new huge pages added to the map. This * number is greater than or equal to zero. */
static long region_add(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *nrg, *trg; long add = 0; spin_lock(&resv->lock); /* Locate the region we are either in or before. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* * If no region exists which can be expanded to include the * specified range, the list must have been modified by an * interleving call to region_del(). Pull a region descriptor * from the cache and use it for this range. */ if (&rg->link == head || t < rg->from) { VM_BUG_ON(resv->region_cache_count <= 0); resv->region_cache_count--; nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); nrg->from = f; nrg->to = t; list_add(&nrg->link, rg->link.prev); add += t - f; goto out_locked; } /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; /* Check for and consume any regions we now overlap with. */ nrg = rg; list_for_each_entry_safe(rg, trg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) break; /* If this area reaches higher then extend our area to * include it completely. If this is not the first area * which we intend to reuse, free it. */ if (rg->to > t) t = rg->to; if (rg != nrg) { /* Decrement return value by the deleted range. * Another range will span this area so that by * end of routine add will be >= zero */ add -= (rg->to - rg->from); list_del(&rg->link); kfree(rg); } } add += (nrg->from - f); /* Added to beginning of region */ nrg->from = f; add += t - nrg->to; /* Added to end of region */ nrg->to = t; out_locked: resv->adds_in_progress--; spin_unlock(&resv->lock); VM_BUG_ON(add < 0); return add; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft14545.89%233.33%
mike kravetzmike kravetz14345.25%233.33%
davidlohr buesodavidlohr bueso165.06%116.67%
joonsoo kimjoonsoo kim123.80%116.67%
Total316100.00%6100.00%

/* * Examine the existing reserve map and determine how many * huge pages in the specified range [f, t) are NOT currently * represented. This routine is called before a subsequent * call to region_add that will actually modify the reserve * map to add the specified range [f, t). region_chg does * not change the number of huge pages represented by the * map. However, if the existing regions in the map can not * be expanded to represent the new range, a new file_region * structure is added to the map as a placeholder. This is * so that the subsequent region_add call will have all the * regions it needs and will not fail. * * Upon entry, region_chg will also examine the cache of region descriptors * associated with the map. If there are not enough descriptors cached, one * will be allocated for the in progress add operation. * * Returns the number of huge pages that need to be added to the existing * reservation map for the range [f, t). This number is greater or equal to * zero. -ENOMEM is returned if a new file_region structure or cache entry * is needed and can not be allocated. */
static long region_chg(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *nrg = NULL; long chg = 0; retry: spin_lock(&resv->lock); retry_locked: resv->adds_in_progress++; /* * Check for sufficient descriptors in the cache to accommodate * the number of in progress add operations. */ if (resv->adds_in_progress > resv->region_cache_count) { struct file_region *trg; VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); /* Must drop lock to allocate a new descriptor. */ resv->adds_in_progress--; spin_unlock(&resv->lock); trg = kmalloc(sizeof(*trg), GFP_KERNEL); if (!trg) { kfree(nrg); return -ENOMEM; } spin_lock(&resv->lock); list_add(&trg->link, &resv->region_cache); resv->region_cache_count++; goto retry_locked; } /* Locate the region we are before or in. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* If we are below the current region then a new region is required. * Subtle, allocate a new region at the position but make it zero * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { if (!nrg) { resv->adds_in_progress--; spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; nrg->from = f; nrg->to = f; INIT_LIST_HEAD(&nrg->link); goto retry; } list_add(&nrg->link, rg->link.prev); chg = t - f; goto out_nrg; } /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; chg = t - f; /* Check for and consume any regions we now overlap with. */ list_for_each_entry(rg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) goto out; /* We overlap with this area, if it extends further than * us then we must extend ourselves. Account for its * existing reservation. */ if (rg->to > t) { chg += rg->to - t; t = rg->to; } chg -= rg->to - rg->from; } out: spin_unlock(&resv->lock); /* We already know we raced and no longer need the new region */ kfree(nrg); return chg; out_nrg: spin_unlock(&resv->lock); return chg; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft21051.72%228.57%
mike kravetzmike kravetz11528.33%228.57%
davidlohr buesodavidlohr bueso6816.75%114.29%
joonsoo kimjoonsoo kim122.96%114.29%
lucas de marchilucas de marchi10.25%114.29%
Total406100.00%7100.00%

/* * Abort the in progress add operation. The adds_in_progress field * of the resv_map keeps track of the operations in progress between * calls to region_chg and region_add. Operations are sometimes * aborted after the call to region_chg. In such cases, region_abort * is called to decrement the adds_in_progress counter. * * NOTE: The range arguments [f, t) are not needed or used in this * routine. They are kept to make reading the calling code easier as * arguments will match the associated region_chg call. */
static void region_abort(struct resv_map *resv, long f, long t) { spin_lock(&resv->lock); VM_BUG_ON(!resv->region_cache_count); resv->adds_in_progress--; spin_unlock(&resv->lock); }

Contributors

PersonTokensPropCommitsCommitProp
mike kravetzmike kravetz46100.00%1100.00%
Total46100.00%1100.00%

/* * Delete the specified range [f, t) from the reserve map. If the * t parameter is LONG_MAX, this indicates that ALL regions after f * should be deleted. Locate the regions which intersect [f, t) * and either trim, delete or split the existing regions. * * Returns the number of huge pages deleted from the reserve map. * In the normal case, the return value is zero or more. In the * case where a region must be split, a new region descriptor must * be allocated. If the allocation fails, -ENOMEM will be returned. * NOTE: If the parameter t == LONG_MAX, then we will never split * a region and possibly return -ENOMEM. Callers specifying * t == LONG_MAX do not need to check for -ENOMEM error. */
static long region_del(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *trg; struct file_region *nrg = NULL; long del = 0; retry: spin_lock(&resv->lock); list_for_each_entry_safe(rg, trg, head, link) { /* * Skip regions before the range to be deleted. file_region * ranges are normally of the form [from, to). However, there * may be a "placeholder" entry in the map which is of the form * (from, to) with from == to. Check for placeholder entries * at the beginning of the range to be deleted. */ if (rg->to <= f && (rg->to != rg->from || rg->to != f)) continue; if (rg->from >= t) break; if (f > rg->from && t < rg->to) { /* Must split region */ /* * Check for an entry in the cache before dropping * lock and attempting allocation. */ if (!nrg && resv->region_cache_count > resv->adds_in_progress) { nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); resv->region_cache_count--; } if (!nrg) { spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; goto retry; } del += t - f; /* New entry for end of split region */ nrg->from = t; nrg->to = rg->to; INIT_LIST_HEAD(&nrg->link); /* Original entry is trimmed */ rg->to = f; list_add(&nrg->link, &rg->link); nrg = NULL; break; } if (f <= rg->from && t >= rg->to) { /* Remove entire region */ del += rg->to - rg->from; list_del(&rg->link); kfree(rg); continue; } if (f <= rg->from) { /* Trim beginning of region */ del += t - rg->from; rg->from = t; } else { /* Trim end of region */ del += rg->to - f; rg->to = f; } } spin_unlock(&resv->lock); kfree(nrg); return del; }

Contributors

PersonTokensPropCommitsCommitProp
mike kravetzmike kravetz23465.18%225.00%
andy whitcroftandy whitcroft7922.01%225.00%
mel gormanmel gorman185.01%225.00%
davidlohr buesodavidlohr bueso164.46%112.50%
joonsoo kimjoonsoo kim123.34%112.50%
Total359100.00%8100.00%

/* * A rare out of memory error was encountered which prevented removal of * the reserve map region for a page. The huge page itself was free'ed * and removed from the page cache. This routine will adjust the subpool * usage count, and the global reserve count if needed. By incrementing * these counts, the reserve map entry which could not be deleted will * appear as a "reserved" entry instead of simply dangling with incorrect * counts. */
void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve) { struct hugepage_subpool *spool = subpool_inode(inode); long rsv_adjust; rsv_adjust = hugepage_subpool_get_pages(spool, 1); if (restore_reserve && rsv_adjust) { struct hstate *h = hstate_inode(inode); hugetlb_acct_memory(h, 1); } }

Contributors

PersonTokensPropCommitsCommitProp
mike kravetzmike kravetz60100.00%1100.00%
Total60100.00%1100.00%

/* * Count and return the number of huge pages in the reserve map * that intersect with the range [f, t). */
static long region_count(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg; long chg = 0; spin_lock(&resv->lock); /* Locate each segment we overlap with, and count that overlap. */ list_for_each_entry(rg, head, link) { long seg_from; long seg_to; if (rg->to <= f) continue; if (rg->from >= t) break; seg_from = max(rg->from, f); seg_to = min(rg->to, t); chg += seg_to - seg_from; } spin_unlock(&resv->lock); return chg; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft8974.79%125.00%
davidlohr buesodavidlohr bueso1613.45%125.00%
joonsoo kimjoonsoo kim1210.08%125.00%
wang sheng-huiwang sheng-hui21.68%125.00%
Total119100.00%4100.00%

/* * Convert the address within this vma to the page offset within * the mapping, in pagecache page units; huge pages here. */
static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { return ((address - vma->vm_start) >> huge_page_shift(h)) + (vma->vm_pgoff >> huge_page_order(h)); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman1736.17%240.00%
andy whitcroftandy whitcroft1634.04%120.00%
andi kleenandi kleen1327.66%120.00%
johannes weinerjohannes weiner12.13%120.00%
Total47100.00%5100.00%


pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address) { return vma_hugecache_offset(hstate_vma(vma), vma, address); }

Contributors

PersonTokensPropCommitsCommitProp
naoya horiguchinaoya horiguchi27100.00%1100.00%
Total27100.00%1100.00%

EXPORT_SYMBOL_GPL(linear_hugepage_index); /* * Return the size of the pages allocated when backing a VMA. In the majority * cases this will be same size as used by the page table entries. */
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) { struct hstate *hstate; if (!is_vm_hugetlb_page(vma)) return PAGE_SIZE; hstate = hstate_vma(vma); return 1UL << huge_page_shift(hstate); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman3992.86%150.00%
wanpeng liwanpeng li37.14%150.00%
Total42100.00%2100.00%

EXPORT_SYMBOL_GPL(vma_kernel_pagesize); /* * Return the page size being used by the MMU to back a VMA. In the majority * of cases, the page size used by the kernel matches the MMU size. On * architectures where it differs, an architecture-specific version of this * function is required. */ #ifndef vma_mmu_pagesize
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { return vma_kernel_pagesize(vma); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman17100.00%1100.00%
Total17100.00%1100.00%

#endif /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to * alignment. */ #define HPAGE_RESV_OWNER (1UL << 0) #define HPAGE_RESV_UNMAPPED (1UL << 1) #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) /* * These helpers are used to track how many pages are reserved for * faults in a MAP_PRIVATE mapping. Only the process that called mmap() * is guaranteed to have their future faults succeed. * * With the exception of reset_vma_resv_huge_pages() which is called at fork(), * the reserve counters are updated with the hugetlb_lock held. It is safe * to reset the VMA at fork() time as it is not in use yet and there is no * chance of the global counters getting corrupted as a result of the values. * * The private mapping reservation is represented in a subtly different * manner to a shared mapping. A shared mapping has a region map associated * with the underlying file, this region map represents the backing file * pages which have ever had a reservation assigned which this persists even * after the page is instantiated. A private mapping has a region map * associated with the original mmap which is attached to all VMAs which * reference it, this region map represents those offsets which have consumed * reservation ie. where pages have been instantiated. */
static unsigned long get_vma_private_data(struct vm_area_struct *vma) { return (unsigned long)vma->vm_private_data; }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman1571.43%266.67%
andy whitcroftandy whitcroft628.57%133.33%
Total21100.00%3100.00%


static void set_vma_private_data(struct vm_area_struct *vma, unsigned long value) { vma->vm_private_data = (void *)value; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft1352.00%150.00%
mel gormanmel gorman1248.00%150.00%
Total25100.00%2100.00%


struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); if (!resv_map || !rg) { kfree(resv_map); kfree(rg); return NULL; } kref_init(&resv_map->refs); spin_lock_init(&resv_map->lock); INIT_LIST_HEAD(&resv_map->regions); resv_map->adds_in_progress = 0; INIT_LIST_HEAD(&resv_map->region_cache); list_add(&rg->link, &resv_map->region_cache); resv_map->region_cache_count = 1; return resv_map; }

Contributors

PersonTokensPropCommitsCommitProp
mike kravetzmike kravetz6451.61%133.33%
andy whitcroftandy whitcroft5241.94%133.33%
davidlohr buesodavidlohr bueso86.45%133.33%
Total124100.00%3100.00%


void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); struct list_head *head = &resv_map->region_cache; struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ region_del(resv_map, 0, LONG_MAX); /* ... and any entries left in the cache */ list_for_each_entry_safe(rg, trg, head, link) { list_del(&rg->link); kfree(rg); } VM_BUG_ON(resv_map->adds_in_progress); kfree(resv_map); }

Contributors

PersonTokensPropCommitsCommitProp
mike kravetzmike kravetz5459.34%266.67%
andy whitcroftandy whitcroft3740.66%133.33%
Total91100.00%3100.00%


static inline struct resv_map *inode_resv_map(struct inode *inode) { return inode->i_mapping->private_data; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim21100.00%1100.00%
Total21100.00%1100.00%


static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (vma->vm_flags & VM_MAYSHARE) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; return inode_resv_map(inode); } else { return (struct resv_map *)(get_vma_private_data(vma) & ~HPAGE_RESV_MASK); } }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim3139.24%116.67%
mel gormanmel gorman2936.71%233.33%
andy whitcroftandy whitcroft1620.25%233.33%
sasha levinsasha levin33.80%116.67%
Total79100.00%6100.00%


static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft3662.07%240.00%
david gibsondavid gibson1525.86%120.00%
sasha levinsasha levin610.34%120.00%
mel gormanmel gorman11.72%120.00%
Total58100.00%5100.00%


static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, get_vma_private_data(vma) | flags); }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft2244.90%114.29%
david gibsondavid gibson1224.49%114.29%
sasha levinsasha levin612.24%114.29%
andrew mortonandrew morton510.20%114.29%
kenneth w. chenkenneth w. chen24.08%114.29%
atsushi nemotoatsushi nemoto12.04%114.29%
mel gormanmel gorman12.04%114.29%
Total49100.00%7100.00%


static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); return (get_vma_private_data(vma) & flag) != 0; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft2155.26%125.00%
nishanth aravamudannishanth aravamudan1334.21%125.00%
sasha levinsasha levin37.89%125.00%
andrew mortonandrew morton12.63%125.00%
Total38100.00%4100.00%

/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (!(vma->vm_flags & VM_MAYSHARE)) vma->vm_private_data = (void *)0; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft2354.76%114.29%
andrew mortonandrew morton716.67%114.29%
mel gormanmel gorman511.90%228.57%
christoph lameterchristoph lameter49.52%228.57%
sasha levinsasha levin37.14%114.29%
Total42100.00%7100.00%

/* Returns true if the VMA has associated reserve pages */
static bool