cregit-Linux how code gets into the kernel

Release 4.17 fs/hugetlbfs/inode.c

Directory: fs/hugetlbfs
/*
 * hugetlbpage-backed filesystem.  Based on ramfs.
 *
 * Nadia Yvette Chambers, 2002
 *
 * Copyright (C) 2002 Linus Torvalds.
 * License: GPL
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/thread_info.h>
#include <asm/current.h>
#include <linux/sched/signal.h>		/* remove ASAP */
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/backing-dev.h>
#include <linux/hugetlb.h>
#include <linux/pagevec.h>
#include <linux/parser.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/dnotify.h>
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/migrate.h>
#include <linux/uio.h>

#include <linux/uaccess.h>


static const struct super_operations hugetlbfs_ops;

static const struct address_space_operations hugetlbfs_aops;

const struct file_operations hugetlbfs_file_operations;

static const struct inode_operations hugetlbfs_dir_inode_operations;

static const struct inode_operations hugetlbfs_inode_operations;


struct hugetlbfs_config {
	
struct hstate		*hstate;
	
long			max_hpages;
	
long			nr_inodes;
	
long			min_hpages;
	
kuid_t			uid;
	
kgid_t			gid;
	
umode_t			mode;
};


int sysctl_hugetlb_shm_group;


enum {
	

Opt_size, Opt_nr_inodes,
	


Opt_mode, Opt_uid, Opt_gid,
	

Opt_pagesize, Opt_min_size,
	
Opt_err,
};


static const match_table_t tokens = {
	{Opt_size,	"size=%s"},
	{Opt_nr_inodes,	"nr_inodes=%s"},
	{Opt_mode,	"mode=%o"},
	{Opt_uid,	"uid=%u"},
	{Opt_gid,	"gid=%u"},
	{Opt_pagesize,	"pagesize=%s"},
	{Opt_min_size,	"min_size=%s"},
	{Opt_err,	NULL},
};

#ifdef CONFIG_NUMA

static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, struct inode *inode, pgoff_t index) { vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, index); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz37100.00%1100.00%
Total37100.00%1100.00%


static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) { mpol_cond_put(vma->vm_policy); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz19100.00%1100.00%
Total19100.00%1100.00%

#else
static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, struct inode *inode, pgoff_t index) { }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz19100.00%1100.00%
Total19100.00%1100.00%


static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) { }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz11100.00%1100.00%
Total11100.00%1100.00%

#endif
static void huge_pagevec_release(struct pagevec *pvec) { int i; for (i = 0; i < pagevec_count(pvec); ++i) put_page(pvec->pages[i]); pagevec_reinit(pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Adam Litke45100.00%1100.00%
Total45100.00%1100.00%

/* * Mask used when checking the page offset value passed in via system * calls. This value will be converted to a loff_t which is signed. * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the * value. The extra bit (- 1 in the shift value) is to take the sign * bit into account. */ #define PGOFF_LOFFT_MAX \ (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); /* * vma address alignment (but not the pgoff alignment) has * already been checked by prepare_hugepage_range. If you add * any error returns here, do so after setting VM_HUGETLB, so * is_vm_hugetlb_page tests below unmap_region go the right * way when do_mmap_pgoff unwinds (may be important on powerpc * and ia64). */ vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; vma->vm_ops = &hugetlb_vm_ops; /* * page based offset in vm_pgoff could be sufficiently large to * overflow a loff_t when converted to byte offset. This can * only happen on architectures where sizeof(loff_t) == * sizeof(unsigned long). So, only check in those instances. */ if (sizeof(unsigned long) == sizeof(loff_t)) { if (vma->vm_pgoff & PGOFF_LOFFT_MAX) return -EINVAL; } /* must be huge page aligned */ if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* check for overflow */ if (len < vma_len) return -EINVAL; inode_lock(inode); file_accessed(file); ret = -ENOMEM; if (hugetlb_reserve_pages(inode, vma->vm_pgoff >> huge_page_order(h), len >> huge_page_shift(h), vma, vma->vm_flags)) goto out; ret = 0; if (vma->vm_flags & VM_WRITE && inode->i_size < len) i_size_write(inode, len); out: inode_unlock(inode); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton8235.34%418.18%
Mike Kravetz5925.43%313.64%
David Gibson2510.78%29.09%
Andi Kleen229.48%14.55%
Hugh Dickins93.88%14.55%
Oleg Nesterov83.45%29.09%
Kenneth W. Chen73.02%14.55%
Yanmin Zhang62.59%14.55%
Mel Gorman62.59%29.09%
Al Viro41.72%29.09%
Adam Litke20.86%14.55%
Konstantin Khlebnikov10.43%14.55%
Becky Bruce10.43%14.55%
Total232100.00%22100.00%

/* * Called under down_write(mmap_sem). */ #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; if (len & ~huge_page_mask(h)) return -EINVAL; if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton11153.62%112.50%
Michel Lespinasse3516.91%112.50%
Benjamin Herrenschmidt2411.59%112.50%
Andi Kleen2311.11%112.50%
Xiao Guangrong73.38%112.50%
Hugh Dickins31.45%112.50%
Wolfgang Wander31.45%112.50%
Yanmin Zhang10.48%112.50%
Total207100.00%8100.00%

#endif
static size_t hugetlbfs_read_actor(struct page *page, unsigned long offset, struct iov_iter *to, unsigned long size) { size_t copied = 0; int i, chunksize; /* Find which 4k chunk and offset with in that chunk */ i = offset >> PAGE_SHIFT; offset = offset & ~PAGE_MASK; while (size) { size_t n; chunksize = PAGE_SIZE; if (offset) chunksize -= offset; if (chunksize > size) chunksize = size; n = copy_page_to_iter(&page[i], offset, chunksize, to); copied += n; if (n != chunksize) return copied; offset = 0; size -= chunksize; i++; } return copied; }

Contributors

PersonTokensPropCommitsCommitProp
Badari Pulavarty10081.30%133.33%
Al Viro2016.26%133.33%
Kirill A. Shutemov32.44%133.33%
Total123100.00%3100.00%

/* * Support for read() - Find the page attached to f_mapping and copy out the * data. Its *very* similar to do_generic_mapping_read(), we can't use that * since it has PAGE_SIZE assumptions. */
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct hstate *h = hstate_file(file); struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; unsigned long index = iocb->ki_pos >> huge_page_shift(h); unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); unsigned long end_index; loff_t isize; ssize_t retval = 0; while (iov_iter_count(to)) { struct page *page; size_t nr, copied; /* nr is the maximum number of bytes to copy from this page */ nr = huge_page_size(h); isize = i_size_read(inode); if (!isize) break; end_index = (isize - 1) >> huge_page_shift(h); if (index > end_index) break; if (index == end_index) { nr = ((isize - 1) & ~huge_page_mask(h)) + 1; if (nr <= offset) break; } nr = nr - offset; /* Find the page */ page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { /* * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ copied = iov_iter_zero(nr, to); } else { unlock_page(page); /* * We have the page, copy it to user space buffer. */ copied = hugetlbfs_read_actor(page, offset, to, nr); put_page(page); } offset += copied; retval += copied; if (copied != nr && iov_iter_count(to)) { if (!retval) retval = -EFAULT; break; } index += offset >> huge_page_shift(h); offset &= ~huge_page_mask(h); } iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Badari Pulavarty17956.11%116.67%
Al Viro6520.38%116.67%
Andi Kleen3711.60%116.67%
Aneesh Kumar K.V3510.97%116.67%
Roel Kluin20.63%116.67%
Kirill A. Shutemov10.31%116.67%
Total319100.00%6100.00%


static int hugetlbfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2152.50%150.00%
Nicholas Piggin1947.50%150.00%
Total40100.00%2100.00%


static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { BUG(); return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2151.22%150.00%
Nicholas Piggin2048.78%150.00%
Total41100.00%2100.00%


static void remove_huge_page(struct page *page) { ClearPageDirty(page); ClearPageUptodate(page); delete_from_page_cache(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2284.62%120.00%
Mike Kravetz13.85%120.00%
MinChan Kim13.85%120.00%
Konstantin Khlebnikov13.85%120.00%
Adrian Bunk13.85%120.00%
Total26100.00%5100.00%


static void hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) { struct vm_area_struct *vma; /* * end == 0 indicates that the entire range after * start should be unmapped. */ vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { unsigned long v_offset; unsigned long v_end; /* * Can the expression below overflow on 32-bit arches? * No, because the interval tree returns us only those vmas * which overlap the truncated area starting at pgoff, * and no vma on a 32-bit arch can span beyond the 4GB. */ if (vma->vm_pgoff < start) v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; else v_offset = 0; if (!end) v_end = vma->vm_end; else { v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; if (v_end > vma->vm_end) v_end = vma->vm_end; } unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, NULL); } }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz12999.23%150.00%
Davidlohr Bueso A10.77%150.00%
Total130100.00%2100.00%

/* * remove_inode_hugepages handles two distinct cases: truncation and hole * punch. There are subtle differences in operation for each case. * * truncation is indicated by end of range being LLONG_MAX * In this case, we first scan the range and release found pages. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv * maps and global counts. Page faults can not race with truncation * in this routine. hugetlb_no_page() prevents page faults in the * truncated range. It checks i_size before allocation, and again after * with the page table lock for the page held. The same lock must be * acquired to unmap a page. * hole punch is indicated if end is not LLONG_MAX * In the hole punch case we scan the range and release found pages. * Only when releasing a page is the associated region/reserv map * deleted. The region/reserv map for ranges without associated * pages are not modified. Page faults can race with hole punch. * This is indicated if we find a mapped page. * Note: If the passed end of range value is beyond the end of file, but * not LLONG_MAX this routine still performs a hole punch operation. */
static void remove_inode_hugepages(struct inode *inode, loff_t lstart, loff_t lend) { struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; const pgoff_t start = lstart >> huge_page_shift(h); const pgoff_t end = lend >> huge_page_shift(h); struct vm_area_struct pseudo_vma; struct pagevec pvec; pgoff_t next, index; int i, freed = 0; bool truncate_op = (lend == LLONG_MAX); memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pagevec_init(&pvec); next = start; while (next < end) { /* * When no more pages are found, we are done. */ if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) break; for (i = 0; i < pagevec_count(&pvec); ++i) { struct page *page = pvec.pages[i]; u32 hash; index = page->index; hash = hugetlb_fault_mutex_hash(h, current->mm, &pseudo_vma, mapping, index, 0); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* * If page is mapped, it was faulted in after being * unmapped in caller. Unmap (again) now after taking * the fault mutex. The mutex will prevent faults * until we finish removing the page. * * This race can only happen in the hole punch case. * Getting here in a truncate operation is a bug. */ if (unlikely(page_mapped(page))) { BUG_ON(truncate_op); i_mmap_lock_write(mapping); hugetlb_vmdelete_list(&mapping->i_mmap, index * pages_per_huge_page(h), (index + 1) * pages_per_huge_page(h)); i_mmap_unlock_write(mapping); } lock_page(page); /* * We must free the huge page and remove from page * cache (remove_huge_page) BEFORE removing the * region/reserve map (hugetlb_unreserve_pages). In * rare out of memory conditions, removal of the * region/reserve map could fail. Correspondingly, * the subpool and global reserve usage count can need * to be adjusted. */ VM_BUG_ON(PagePrivate(page)); remove_huge_page(page); freed++; if (!truncate_op) { if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) hugetlb_fix_reserve_counts(inode); } unlock_page(page); mutex_unlock(&hugetlb_fault_mutex_table[hash]); } huge_pagevec_release(&pvec); cond_resched(); } if (truncate_op) (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz20956.79%325.00%
Andrew Morton10227.72%216.67%
Andi Kleen143.80%18.33%
Kenneth W. Chen133.53%18.33%
Jan Kara133.53%216.67%
David Gibson123.26%18.33%
zhong jiang41.09%18.33%
Adrian Bunk10.27%18.33%
Total368100.00%12100.00%


static void hugetlbfs_evict_inode(struct inode *inode) { struct resv_map *resv_map; remove_inode_hugepages(inode, 0, LLONG_MAX); resv_map = (struct resv_map *)inode->i_mapping->private_data; /* root inode doesn't have the resv_map, so we should check it */ if (resv_map) resv_map_release(&resv_map->refs); clear_inode(inode); }

Contributors

PersonTokensPropCommitsCommitProp
JoonSoo Kim3155.36%116.67%
Andrew Morton1323.21%116.67%
Christoph Hellwig712.50%116.67%
Mike Kravetz35.36%116.67%
Al Viro11.79%116.67%
Jan Kara11.79%116.67%
Total56100.00%6100.00%


static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) { pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); BUG_ON(offset & ~huge_page_mask(h)); pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); i_mmap_lock_write(mapping); if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); i_mmap_unlock_write(mapping); remove_inode_hugepages(inode, offset, LLONG_MAX); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton7570.09%218.18%
Andi Kleen1413.08%19.09%
Mike Kravetz65.61%218.18%
Davidlohr Bueso A43.74%218.18%
Ken Chen43.74%19.09%
Hugh Dickins21.87%19.09%
Michel Lespinasse10.93%19.09%
David Gibson10.93%19.09%
Total107100.00%11100.00%


static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) { struct hstate *h = hstate_inode(inode); loff_t hpage_size = huge_page_size(h); loff_t hole_start, hole_end; /* * For hole punch round up the beginning offset of the hole and * round down the end. */ hole_start = round_up(offset, hpage_size); hole_end = round_down(offset + len, hpage_size); if (hole_end > hole_start) { struct address_space *mapping = inode->i_mapping; struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); inode_lock(inode); /* protected by i_mutex */ if (info->seals & F_SEAL_WRITE) { inode_unlock(inode); return -EPERM; } i_mmap_lock_write(mapping); if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, hole_start >> PAGE_SHIFT, hole_end >> PAGE_SHIFT); i_mmap_unlock_write(mapping); remove_inode_hugepages(inode, hole_start, hole_end); inode_unlock(inode); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz8147.93%112.50%
Andrew Morton3822.49%225.00%
Marc-André Lureau3017.75%112.50%
Andi Kleen116.51%112.50%
Christoph Hellwig52.96%112.50%
Al Viro21.18%112.50%
Davidlohr Bueso A21.18%112.50%
Total169100.00%8100.00%


static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); struct vm_area_struct pseudo_vma; struct mm_struct *mm = current->mm; loff_t hpage_size = huge_page_size(h); unsigned long hpage_shift = huge_page_shift(h); pgoff_t start, index, end; int error; u32 hash; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) return hugetlbfs_punch_hole(inode, offset, len); /* * Default preallocate case. * For this range, start is rounded down and end is rounded up * as well as being converted to page offsets. */ start = offset >> hpage_shift; end = (offset + len + hpage_size - 1) >> hpage_shift; inode_lock(inode); /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ error = inode_newsize_ok(inode, offset + len); if (error) goto out; if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { error = -EPERM; goto out; } /* * Initialize a pseudo vma as this is required by the huge page * allocation routines. If NUMA is configured, use page index * as input to create an allocation policy. */ memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pseudo_vma.vm_file = file; for (index = start; index < end; index++) { /* * This is supposed to be the vaddr where the page is being * faulted in, but we have no vaddr here. */ struct page *page; unsigned long addr; int avoid_reserve = 0; cond_resched(); /* * fallocate(2) manpage permits EINTR; we may have been * interrupted because we are using up too much memory. */ if (signal_pending(current)) { error = -EINTR; break; } /* Set numa allocation policy based on index */ hugetlb_set_vma_policy(&pseudo_vma, inode, index); /* addr is the offset within the file (zero based) */ addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, index, addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ page = find_get_page(mapping, index); if (page) { put_page(page); mutex_unlock(&hugetlb_fault_mutex_table[hash]); hugetlb_drop_vma_policy(&pseudo_vma); continue; } /* Allocate page and add to page cache */ page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); hugetlb_drop_vma_policy(&pseudo_vma); if (IS_ERR(page)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); error = PTR_ERR(page); goto out; } clear_huge_page(page, addr, pages_per_huge_page(h)); __SetPageUptodate(page); error = huge_add_to_page_cache(page, mapping, index); if (unlikely(error)) { put_page(page); mutex_unlock(&hugetlb_fault_mutex_table[hash]); goto out; } mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * unlock_page because locked by add_to_page_cache() * page_put due to reference from alloc_huge_page() */ unlock_page(page); put_page(page); } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) i_size_write(inode, offset + len); inode->i_ctime = current_time(inode); out: inode_unlock(inode); return error; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz48991.06%116.67%
Marc-André Lureau387.08%116.67%
Deepa Dinamani40.74%116.67%
Nadav Amit30.56%116.67%
Al Viro20.37%116.67%
Andries E. Brouwer10.19%116.67%
Total537100.00%6100.00%


static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct hstate *h = hstate_inode(inode); int error; unsigned int ia_valid = attr->ia_valid; struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); BUG_ON(!inode); error = setattr_prepare(dentry, attr); if (error) return error; if (ia_valid & ATTR_SIZE) { loff_t oldsize = inode->i_size; loff_t newsize = attr->ia_size; if (newsize & ~huge_page_mask(h)) return -EINVAL; /* protected by i_mutex */ if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || (newsize > oldsize && (info->seals & F_SEAL_GROW))) return -EPERM; error = hugetlb_vmtruncate(inode, newsize); if (error) return error; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz11965.75%133.33%
Marc-André Lureau6033.15%133.33%
Jan Kara21.10%133.33%
Total181100.00%3100.00%


static struct inode *hugetlbfs_get_root(struct super_block *sb, struct hugetlbfs_config *config) { struct inode *inode; inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_mode = S_IFDIR | config->mode; inode->i_uid = config->uid; inode->i_gid = config->gid; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); lockdep_annotate_inode_mutex_key(inode); } return inode; }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro4842.11%114.29%
Andrew Morton4539.47%228.57%
Christoph Hellwig76.14%114.29%
Mike Kravetz54.39%114.29%
Aneesh Kumar K.V54.39%114.29%
Deepa Dinamani43.51%114.29%
Total114100.00%7100.00%

/* * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never * be taken from reclaim -- unlike regular filesystems. This needs an * annotation because huge_pmd_share() does an allocation under hugetlb's * i_mmap_rwsem. */ static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev) { struct inode *inode; struct resv_map *resv_map; resv_map = resv_map_alloc(); if (!resv_map) return NULL; inode = new_inode(sb); if (inode) { struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, &hugetlbfs_i_mmap_rwsem_key); inode->i_mapping->a_ops = &hugetlbfs_aops; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); inode->i_mapping->private_data = resv_map; info->seals = F_SEAL_SEAL; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &hugetlbfs_inode_operations; inode->i_fop = &hugetlbfs_file_operations; break; case S_IFDIR: inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); break; } lockdep_annotate_inode_mutex_key(inode); } else kref_put(&resv_map->refs, resv_map_release); return inode; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton9440.00%215.38%
Al Viro6326.81%323.08%
JoonSoo Kim3213.62%17.69%
Marc-André Lureau166.81%17.69%
Michal Hocko114.68%17.69%
Josh Boyer52.13%17.69%
Kenneth W. Chen52.13%17.69%
Deepa Dinamani41.70%17.69%
Dave Hansen31.28%17.69%
Davidlohr Bueso A20.85%17.69%
Total235100.00%13100.00%

/* * File creation. Allocate an inode, and we're done.. */
static int hugetlbfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; int error = -ENOSPC; inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); if (inode) { dir->i_ctime = dir->i_mtime = current_time(dir); d_instantiate(dentry, inode); dget(dentry); /* Extra count - pin the dentry in core */ error = 0; } return error; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton8091.95%450.00%
Deepa Dinamani44.60%112.50%
Al Viro22.30%225.00%
Andries E. Brouwer11.15%112.50%
Total87100.00%8100.00%


static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); if (!retval) inc_nlink(dir); return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4491.67%133.33%
Dave Hansen36.25%133.33%
Al Viro12.08%133.33%
Total48100.00%3100.00%


static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3288.89%125.00%
Al Viro38.33%250.00%
Trond Myklebust12.78%125.00%
Total36100.00%4100.00%


static int hugetlbfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; int error = -ENOSPC; inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); if (!error) { d_instantiate(dentry, inode); dget(dentry); } else iput(inode); } dir->i_ctime = dir->i_mtime = current_time(dir); return error; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton11295.73%360.00%
Deepa Dinamani43.42%120.00%
Al Viro10.85%120.00%
Total117100.00%5100.00%

/* * mark the head page dirty */
static int hugetlbfs_set_page_dirty(struct page *page) { struct page *head = compound_head(page); SetPageDirty(head); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ken Chen1448.28%125.00%
Andrew Morton1344.83%125.00%
Adrian Bunk13.45%125.00%
Christoph Lameter13.45%125.00%
Total29100.00%4100.00%


static int hugetlbfs_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) { int rc; rc = migrate_huge_page_move_mapping(mapping, newpage, page); if (rc != MIGRATEPAGE_SUCCESS) return rc; if (mode != MIGRATE_SYNC_NO_COPY) migrate_page_copy(newpage, page); else migrate_page_states(newpage, page); return MIGRATEPAGE_SUCCESS; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi5170.83%120.00%
Jérôme Glisse1419.44%120.00%
Mel Gorman45.56%240.00%
Rafael Aquini34.17%120.00%
Total72100.00%5100.00%


static int hugetlbfs_error_remove_page(struct address_space *mapping, struct page *page) { struct inode *inode = mapping->host; pgoff_t index = page->index; remove_huge_page(page); if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) hugetlb_fix_reserve_counts(inode); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi3860.32%150.00%
Mike Kravetz2539.68%150.00%
Total63100.00%2100.00%

/* * Display the mount options in /proc/mounts. */
static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); struct hugepage_subpool *spool = sbinfo->spool; unsigned long hpage_size = huge_page_size(sbinfo->hstate); unsigned hpage_shift = huge_page_shift(sbinfo->hstate); char mod; if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, sbinfo->uid)); if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); if (sbinfo->mode != 0755) seq_printf(m, ",mode=%o", sbinfo->mode); if (sbinfo->max_inodes != -1) seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); hpage_size /= 1024; mod = 'K'; if (hpage_size >= 1024) { hpage_size /= 1024; mod = 'M'; } seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); if (spool) { if (spool->max_hpages != -1) seq_printf(m, ",size=%llu", (unsigned long long)spool->max_hpages << hpage_shift); if (spool->min_hpages != -1) seq_printf(m, ",min_size=%llu", (unsigned long long)spool->min_hpages << hpage_shift); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells256100.00%1100.00%
Total256100.00%1100.00%


static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); struct hstate *h = hstate_inode(d_inode(dentry)); buf->f_type = HUGETLBFS_MAGIC; buf->f_bsize = huge_page_size(h); if (sbinfo) { spin_lock(&sbinfo->stat_lock); /* If no limits set, just report 0 for max/free/used * blocks, like simple_statfs() */ if (sbinfo->spool) { long free_pages; spin_lock(&sbinfo->spool->lock); buf->f_blocks = sbinfo->spool->max_hpages; free_pages = sbinfo->spool->max_hpages - sbinfo->spool->used_hpages; buf->f_bavail = buf->f_bfree = free_pages; spin_unlock(&sbinfo->spool->lock); buf->f_files = sbinfo->max_inodes; buf->f_ffree = sbinfo->free_inodes; } spin_unlock(&sbinfo->stat_lock); } buf->f_namelen = NAME_MAX; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton9153.85%333.33%
David Gibson4828.40%222.22%
Andi Kleen148.28%111.11%
Art Haas84.73%111.11%
David Howells84.73%222.22%
Total169100.00%9100.00%


static void hugetlbfs_put_super(struct super_block *sb) { struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); if (sbi) { sb->s_fs_info = NULL; if (sbi->spool) hugepage_put_subpool(sbi->spool); kfree(sbi); } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3874.51%150.00%
David Gibson1325.49%150.00%
Total51100.00%2100.00%


static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); if (unlikely(!sbinfo->free_inodes)) { spin_unlock(&sbinfo->stat_lock); return 0; } sbinfo->free_inodes--; spin_unlock(&sbinfo->stat_lock); } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig69100.00%1100.00%
Total69100.00%1100.00%


static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); sbinfo->free_inodes++; spin_unlock(&sbinfo->stat_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig42100.00%1100.00%
Total42100.00%1100.00%

static struct kmem_cache *hugetlbfs_inode_cachep;
static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); struct hugetlbfs_inode_info *p; if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; } /* * Any time after allocation, hugetlbfs_destroy_inode can be called * for the inode. mpol_free_shared_policy is unconditionally called * as part of hugetlbfs_destroy_inode. So, initialize policy here * in case of a quick call to destroy. * * Note that the policy is initialized even if we are creating a * private inode. This simplifies hugetlbfs_destroy_inode. */ mpol_shared_policy_init(&p->policy, NULL); return &p->vfs_inode; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4046.51%125.00%
Christoph Hellwig3439.53%125.00%
Mike Kravetz1112.79%125.00%
Christoph Lameter11.16%125.00%
Total86100.00%4100.00%


static void hugetlbfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin36100.00%1100.00%
Total36100.00%1100.00%


static void hugetlbfs_destroy_inode(struct inode *inode) { hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); call_rcu(&inode->i_rcu, hugetlbfs_i_callback); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2661.90%133.33%
Christoph Hellwig1023.81%133.33%
Nicholas Piggin614.29%133.33%
Total42100.00%3100.00%

static const struct address_space_operations hugetlbfs_aops = { .write_begin = hugetlbfs_write_begin, .write_end = hugetlbfs_write_end, .set_page_dirty = hugetlbfs_set_page_dirty, .migratepage = hugetlbfs_migrate_page, .error_remove_page = hugetlbfs_error_remove_page, };
static void init_once(void *foo) { struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; inode_init_once(&ei->vfs_inode); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig2790.00%150.00%
Christoph Lameter310.00%150.00%
Total30100.00%2100.00%

const struct file_operations hugetlbfs_file_operations = { .read_iter = hugetlbfs_read_iter, .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, .llseek = default_llseek, .fallocate = hugetlbfs_fallocate, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { .create = hugetlbfs_create, .lookup = simple_lookup, .link = simple_link, .unlink = simple_unlink, .symlink = hugetlbfs_symlink, .mkdir = hugetlbfs_mkdir, .rmdir = simple_rmdir, .mknod = hugetlbfs_mknod, .rename = simple_rename, .setattr = hugetlbfs_setattr, }; static const struct inode_operations hugetlbfs_inode_operations = { .setattr = hugetlbfs_setattr, }; static const struct super_operations hugetlbfs_ops = { .alloc_inode = hugetlbfs_alloc_inode, .destroy_inode = hugetlbfs_destroy_inode, .evict_inode = hugetlbfs_evict_inode, .statfs = hugetlbfs_statfs, .put_super = hugetlbfs_put_super, .show_options = hugetlbfs_show_options, }; enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; /* * Convert size option passed from command line to number of huge pages * in the pool specified by hstate. Size option could be in bytes * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). */
static long hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, enum hugetlbfs_size_type val_type) { if (val_type == NO_SIZE) return -1; if (val_type == SIZE_PERCENT) { size_opt <<= huge_page_shift(h); size_opt *= h->max_huge_pages; do_div(size_opt, 100); } size_opt >>= huge_page_shift(h); return size_opt; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Kravetz6697.06%150.00%
David Howells22.94%150.00%
Total68100.00%2100.00%


static int hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) { char *p, *rest; substring_t args[MAX_OPT_ARGS]; int option; unsigned long long max_size_opt = 0, min_size_opt = 0; enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE; if (!options) return 0; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) goto bad_val; pconfig->uid = make_kuid(current_user_ns(), option); if (!uid_valid(pconfig->uid)) goto bad_val; break; case Opt_gid: if (match_int(&args[0], &option)) goto bad_val; pconfig->gid = make_kgid(current_user_ns(), option); if (!gid_valid(pconfig->gid)) goto bad_val; break; case Opt_mode: if (match_octal(&args[0], &option)) goto bad_val; pconfig->mode = option & 01777U; break; case Opt_size: { /* memparse() will accept a K/M/G without a digit */ if (!isdigit(*args[0].from)) goto bad_val; max_size_opt = memparse(args[0].from, &rest); max_val_type = SIZE_STD; if (*rest == '%') max_val_type = SIZE_PERCENT; break; } case Opt_nr_inodes: /* memparse() will accept a K/M/G without a digit */ if (!isdigit(*args[0].from)) goto bad_val; pconfig->nr_inodes = memparse(args[0].from, &rest); break; case Opt_pagesize: { unsigned long ps; ps = memparse(args[0].from, &rest); pconfig->hstate = size_to_hstate(ps); if (!pconfig->hstate) { pr_err("Unsupported page size %lu MB\n", ps >> 20); return -EINVAL; } break; } case Opt_min_size: { /* memparse() will accept a K/M/G without a digit */ if (!isdigit(*args[0].from)) goto bad_val; min_size_opt = memparse(args[0].from, &rest); min_val_type = SIZE_STD; if (*rest == '%') min_val_type = SIZE_PERCENT; break; } default: pr_err("Bad mount option: \"%s\"\n", p); return -EINVAL; break; } } /* * Use huge page pool size (in hstate) to convert the size * options to number of huge pages. If NO_SIZE, -1 is returned. */ pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, max_size_opt, max_val_type); pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, min_size_opt, min_val_type); /* * If max_size was specified, then min_size must be smaller */ if (max_val_type > NO_SIZE && pconfig->min_hpages > pconfig->max_hpages) { pr_err("minimum size can not be greater than maximum size\n"); return -EINVAL; } return 0; bad_val: pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p); return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Randy Dunlap14127.01%18.33%
Andrew Morton13826.44%433.33%
Mike Kravetz10820.69%18.33%
Andi Kleen8315.90%18.33%
Eric W. Biedermann387.28%18.33%
Lee Schermerhorn91.72%18.33%
David Howells20.38%18.33%
Akinobu Mita20.38%18.33%
Ken Chen10.19%18.33%
Total522100.00%12100.00%


static int hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) { int ret; struct hugetlbfs_config config; struct hugetlbfs_sb_info *sbinfo; config.max_hpages = -1; /* No limit on size by default */ config.nr_inodes = -1; /* No limit on number of inodes by default */ config.uid = current_fsuid(); config.gid = current_fsgid(); config.mode = 0755; config.hstate = &default_hstate; config.min_hpages = -1; /* No default minimum size */ ret = hugetlbfs_parse_options(data, &config); if (ret) return ret; sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); if (!sbinfo) return -ENOMEM; sb->s_fs_info = sbinfo; sbinfo->hstate = config.hstate; spin_lock_init(&sbinfo->stat_lock); sbinfo->max_inodes = config.nr_inodes; sbinfo->free_inodes = config.nr_inodes; sbinfo->spool = NULL; sbinfo->uid = config.uid; sbinfo->gid = config.gid; sbinfo->mode = config.mode; /* * Allocate and initialize subpool if maximum or minimum size is * specified. Any needed reservations (for minimim size) are taken * taken when the subpool is created. */ if (config.max_hpages != -1 || config.min_hpages != -1) { sbinfo->spool = hugepage_new_subpool(config.hstate, config.max_hpages, config.min_hpages); if (!sbinfo->spool) goto out_free; } sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = huge_page_size(config.hstate); sb->s_blocksize_bits = huge_page_shift(config.hstate); sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); if (!sb->s_root) goto out_free; return 0; out_free: kfree(sbinfo->spool); kfree(sbinfo); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton19158.05%535.71%
David Gibson3410.33%17.14%
Andi Kleen3310.03%214.29%
David Howells288.51%214.29%
Mike Kravetz278.21%17.14%
Al Viro103.04%214.29%
William Lee Irwin III61.82%17.14%
Total329100.00%14100.00%


static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3183.78%133.33%
Al Viro513.51%133.33%
Andries E. Brouwer12.70%133.33%
Total37100.00%3100.00%

static struct file_system_type hugetlbfs_fs_type = { .name = "hugetlbfs", .mount = hugetlbfs_mount, .kill_sb = kill_litter_super, }; static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
static int can_do_hugetlb_shm(void) { kgid_t shm_group; shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); return capable(CAP_IPC_LOCK) || in_group_p(shm_group); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1650.00%133.33%
Eric W. Biedermann1443.75%133.33%
Mel Gorman26.25%133.33%
Total32100.00%3100.00%


static int get_hstate_idx(int page_size_log) { struct hstate *h = hstate_sizelog(page_size_log); if (!h) return -1; return h - hstates; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen3090.91%150.00%
Naoya Horiguchi39.09%150.00%
Total33100.00%2100.00%

static const struct dentry_operations anon_ops = { .d_dname = simple_dname }; /* * Note that size should be aligned to proper hugepage size in caller side, * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. */
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, struct user_struct **user, int creat_flags, int page_size_log) { struct file *file = ERR_PTR(-ENOMEM); struct inode *inode; struct path path; struct super_block *sb; struct qstr quick_string; int hstate_idx; hstate_idx = get_hstate_idx(page_size_log); if (hstate_idx < 0) return ERR_PTR(-ENODEV); *user = NULL; if (!hugetlbfs_vfsmount[hstate_idx]) return ERR_PTR(-ENOENT); if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { *user = current_user(); if (user_shm_lock(size, *user)) { task_lock(current); pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", current->comm, current->pid); task_unlock(current); } else { *user = NULL; return ERR_PTR(-EPERM); } } sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb; quick_string.name = name; quick_string.len = strlen(quick_string.name); quick_string.hash = 0; path.dentry = d_alloc_pseudo(sb, &quick_string); if (!path.dentry) goto out_shm_unlock; d_set_d_op(path.dentry, &anon_ops); path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]); file = ERR_PTR(-ENOSPC); inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); if (!inode) goto out_dentry; if (creat_flags == HUGETLB_SHMFS_INODE) inode->i_flags |= S_PRIVATE; file = ERR_PTR(-ENOMEM); if (hugetlb_reserve_pages(inode, 0, size >> huge_page_shift(hstate_inode(inode)), NULL, acctflag)) goto out_inode; d_instantiate(path.dentry, inode); inode->i_size = size; clear_nlink(inode); file = alloc_file(&path, FMODE_WRITE | FMODE_READ, &hugetlbfs_file_operations); if (IS_ERR(file)) goto out_dentry; /* inode is already attached */ return file; out_inode: iput(inode); out_dentry: path_put(&path); out_shm_unlock: if (*user) { user_shm_unlock(size, *user); *user = NULL; } return file; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton13332.28%414.29%
Al Viro4410.68%414.29%
Andi Kleen358.50%13.57%
Hugh Dickins348.25%13.57%
Ravikiran G. Thirumalai276.55%13.57%
David Gibson215.10%13.57%
David Rientjes194.61%13.57%
Anatol Pomozov184.37%13.57%
Dave Hansen143.40%13.57%
Akinobu Mita122.91%13.57%
Stephen D. Smalley122.91%13.57%
Mel Gorman112.67%310.71%
Naoya Horiguchi92.18%13.57%
Eric W. Biedermann71.70%13.57%
Rik Van Riel51.21%13.57%
Miklos Szeredi30.73%13.57%
Eric B Munson30.73%13.57%
Kenneth W. Chen30.73%13.57%
Motohiro Kosaki10.24%13.57%
David Howells10.24%13.57%
Total412100.00%28100.00%


static int __init init_hugetlbfs_fs(void) { struct hstate *h; int error; int i; if (!hugepages_supported()) { pr_info("disabling because there are no supported hugepage sizes\n"); return -ENOTSUPP; } error = -ENOMEM; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), 0, SLAB_ACCOUNT, init_once); if (hugetlbfs_inode_cachep == NULL) goto out2; error = register_filesystem(&hugetlbfs_fs_type); if (error) goto out; i = 0; for_each_hstate(h) { char buf[50]; unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10); snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb); hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type, buf); if (IS_ERR(hugetlbfs_vfsmount[i])) { pr_err("Cannot mount internal hugetlbfs for " "page size %uK", ps_kb); error = PTR_ERR(hugetlbfs_vfsmount[i]); hugetlbfs_vfsmount[i] = NULL; } i++; } /* Non default hstates are optional */ if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx])) return 0; out: kmem_cache_destroy(hugetlbfs_inode_cachep); out2: return error; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen9947.14%112.50%
Andrew Morton8440.00%337.50%
Nishanth Aravamudan167.62%112.50%
Peter Zijlstra73.33%112.50%
Hillf Danton31.43%112.50%
Vladimir Davydov10.48%112.50%
Total210100.00%8100.00%

fs_initcall(init_hugetlbfs_fs)

Overall Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton194230.37%1710.12%
Mike Kravetz146822.96%116.55%
Andi Kleen4266.66%42.38%
David Howells3134.90%52.98%
Al Viro2944.60%1710.12%
Badari Pulavarty2824.41%10.60%
Randy Dunlap2103.28%21.19%
Christoph Hellwig2043.19%63.57%
David Gibson1652.58%52.98%
Marc-André Lureau1442.25%10.60%
Naoya Horiguchi1151.80%31.79%
Nicholas Piggin851.33%21.19%
JoonSoo Kim630.99%10.60%
Eric W. Biedermann590.92%21.19%
Hugh Dickins490.77%52.98%
Adam Litke470.74%21.19%
Aneesh Kumar K.V400.63%21.19%
Michel Lespinasse360.56%21.19%
Kenneth W. Chen280.44%10.60%
Benjamin Herrenschmidt270.42%10.60%
Ravikiran G. Thirumalai270.42%10.60%
Mel Gorman230.36%52.98%
Dave Hansen200.31%21.19%
Ken Chen200.31%31.79%
Deepa Dinamani200.31%10.60%
David Rientjes190.30%10.60%
Anatol Pomozov180.28%10.60%
Jan Kara160.25%42.38%
Art Haas160.25%10.60%
Nishanth Aravamudan160.25%10.60%
Akinobu Mita140.22%21.19%
Jérôme Glisse140.22%10.60%
Michal Hocko140.22%10.60%
Stephen D. Smalley120.19%10.60%
Davidlohr Bueso A100.16%31.79%
Lee Schermerhorn90.14%10.60%
Oleg Nesterov80.13%21.19%
Yanmin Zhang70.11%21.19%
Miklos Szeredi70.11%21.19%
Peter Zijlstra70.11%10.60%
Christoph Lameter70.11%42.38%
Xiao Guangrong70.11%10.60%
Kirill A. Shutemov60.09%31.79%
Arjan van de Ven60.09%21.19%
William Lee Irwin III60.09%10.60%
Arnd Bergmann50.08%10.60%
Adrian Bunk50.08%21.19%
Rik Van Riel50.08%10.60%
Josh Boyer50.08%10.60%
zhong jiang40.06%10.60%
Rafael Aquini30.05%10.60%
Andries E. Brouwer30.05%21.19%
Nick Black30.05%10.60%
Wolfgang Wander30.05%10.60%
Nadav Amit30.05%10.60%
Eric B Munson30.05%10.60%
Arnaldo Carvalho de Melo30.05%10.60%
Hillf Danton30.05%10.60%
Paul Gortmaker20.03%10.60%
Fabian Frederick20.03%21.19%
Roel Kluin20.03%10.60%
Greg Kroah-Hartman20.03%10.60%
Josef 'Jeff' Sipek20.03%10.60%
Konstantin Khlebnikov20.03%21.19%
Motohiro Kosaki10.02%10.60%
Ingo Molnar10.02%10.60%
Steven Whitehouse10.02%10.60%
Trond Myklebust10.02%10.60%
Becky Bruce10.02%10.60%
Vladimir Davydov10.02%10.60%
Linus Torvalds10.02%10.60%
MinChan Kim10.02%10.60%
Alexey Dobriyan0.00%00.00%
Total6394100.00%168100.00%
Directory: fs/hugetlbfs
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.