cregit-Linux how code gets into the kernel

Release 4.12 include/linux/pagemap.h

Directory: include/linux
#ifndef _LINUX_PAGEMAP_H

#define _LINUX_PAGEMAP_H

/*
 * Copyright 1995 Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>

/*
 * Bits in mapping->flags.
 */

enum mapping_flags {
	
AS_EIO		= 0,	/* IO error on async write */
	
AS_ENOSPC	= 1,	/* ENOSPC on async write */
	
AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
	
AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
	
AS_EXITING	= 4, 	/* final truncate in progress */
	/* writeback related tags are not used */
	
AS_NO_WRITEBACK_TAGS = 5,
};


static inline void mapping_set_error(struct address_space *mapping, int error) { if (unlikely(error)) { if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else set_bit(AS_EIO, &mapping->flags); } }

Contributors

PersonTokensPropCommitsCommitProp
Guillaume Chazarain4994.23%150.00%
Andrew Morton35.77%150.00%
Total52100.00%2100.00%


static inline void mapping_set_unevictable(struct address_space *mapping) { set_bit(AS_UNEVICTABLE, &mapping->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn22100.00%1100.00%
Total22100.00%1100.00%


static inline void mapping_clear_unevictable(struct address_space *mapping) { clear_bit(AS_UNEVICTABLE, &mapping->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn22100.00%1100.00%
Total22100.00%1100.00%


static inline int mapping_unevictable(struct address_space *mapping) { if (mapping) return test_bit(AS_UNEVICTABLE, &mapping->flags); return !!mapping; }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn32100.00%2100.00%
Total32100.00%2100.00%


static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Johannes Weiner22100.00%1100.00%
Total22100.00%1100.00%


static inline int mapping_exiting(struct address_space *mapping) { return test_bit(AS_EXITING, &mapping->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Johannes Weiner2191.30%150.00%
Huang Ying28.70%150.00%
Total23100.00%2100.00%


static inline void mapping_set_no_writeback_tags(struct address_space *mapping) { set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying22100.00%1100.00%
Total22100.00%1100.00%


static inline int mapping_use_writeback_tags(struct address_space *mapping) { return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying2291.67%150.00%
Johannes Weiner28.33%150.00%
Total24100.00%2100.00%


static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1588.24%133.33%
Al Viro15.88%133.33%
Michal Hocko15.88%133.33%
Total17100.00%3100.00%

/* Restricts the given gfp_mask to what the mapping allows. */
static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, gfp_t gfp_mask) { return mapping_gfp_mask(mapping) & gfp_mask; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko23100.00%1100.00%
Total23100.00%1100.00%

/* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... */
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { m->gfp_mask = mask; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1990.48%133.33%
Al Viro14.76%133.33%
Michal Hocko14.76%133.33%
Total21100.00%3100.00%

void release_pages(struct page **pages, int nr, bool cold); /* * speculatively take a reference to a page. * If the page is free (_refcount == 0), then _refcount is untouched, and 0 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. * * This function must be called inside the same rcu_read_lock() section as has * been used to lookup the page in the pagecache radix-tree (or page table): * this allows allocators to use a synchronize_rcu() to stabilize _refcount. * * Unless an RCU grace period has passed, the count of all pages coming out * of the allocator must be considered unstable. page_count may return higher * than expected, and put_page must be able to do the right thing when the * page has been finished with, no matter what it is subsequently allocated * for (because put_page is what is used here to drop an invalid speculative * reference). * * This is the interesting part of the lockless pagecache (and lockless * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) * has the following pattern: * 1. find page in radix tree * 2. conditionally increment refcount * 3. check the page is still in pagecache (if no, goto 1) * * Remove-side that cares about stability of _refcount (eg. reclaim) has the * following (with tree_lock held for write): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache * C. free the page * * There are 2 critical interleavings that matter: * - 2 runs before A: in this case, A sees elevated refcount and bails out * - A runs before 2: in this case, 2 sees zero refcount and retries; * subsequently, B will complete and 1 will find no page, causing the * lookup to return NULL. * * It is possible that between 1 and 2, the page is removed then the exact same * page is inserted into the same position in pagecache. That's OK: the * old find_get_page using tree_lock could equally have run before or after * such a re-insertion, depending on order that locks are granted. * * Lookups racing against pagecache insertion isn't a big problem: either 1 * will find the page or it will not. Likewise, the old find_get_page could run * either before the insertion or afterwards, depending on timing. */
static inline int page_cache_get_speculative(struct page *page) { VM_BUG_ON(in_interrupt()); #ifdef CONFIG_TINY_RCU # ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic() && !irqs_disabled()); # endif /* * Preempt must be disabled here - we rely on rcu_read_lock doing * this for us. * * Pagecache won't be truncated from interrupt context, so if we have * found a page in the radix tree here, we have pinned its refcount by * disabling preempt, and hence no need for the "speculative get" that * SMP requires. */ VM_BUG_ON_PAGE(page_count(page) == 0, page); page_ref_inc(page); #else if (unlikely(!get_page_unless_zero(page))) { /* * Either the page has been freed, or will be freed. * In either case, retry here and the caller should * do the right thing (see comments above). */ return 0; } #endif VM_BUG_ON_PAGE(PageTail(page), page); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin7483.15%116.67%
Sasha Levin66.74%116.67%
Kirill A. Shutemov44.49%116.67%
Paul E. McKenney33.37%116.67%
JoonSoo Kim11.12%116.67%
Frédéric Weisbecker11.12%116.67%
Total89100.00%6100.00%

/* * Same as above, but add instead of inc (could just be merged) */
static inline int page_cache_add_speculative(struct page *page, int count) { VM_BUG_ON(in_interrupt()); #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) # ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic() && !irqs_disabled()); # endif VM_BUG_ON_PAGE(page_count(page) == 0, page); page_ref_add(page, count); #else if (unlikely(!page_ref_add_unless(page, count, 0))) return 0; #endif VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin9485.45%116.67%
Sasha Levin65.45%116.67%
JoonSoo Kim43.64%116.67%
Kirill A. Shutemov43.64%116.67%
Frédéric Weisbecker10.91%116.67%
Paul E. McKenney10.91%116.67%
Total110100.00%6100.00%

#ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else
static inline struct page *__page_cache_alloc(gfp_t gfp) { return alloc_pages(gfp, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin1575.00%150.00%
Paul Jackson525.00%150.00%
Total20100.00%2100.00%

#endif
static inline struct page *page_cache_alloc(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1982.61%133.33%
Andrew Morton313.04%133.33%
Nicholas Piggin14.35%133.33%
Total23100.00%3100.00%


static inline struct page *page_cache_alloc_cold(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2496.00%266.67%
Nicholas Piggin14.00%133.33%
Total25100.00%3100.00%


static inline gfp_t readahead_gfp_mask(struct address_space *x) { return mapping_gfp_mask(x) | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; }

Contributors

PersonTokensPropCommitsCommitProp
Fengguang Wu2291.67%150.00%
Michal Hocko28.33%150.00%
Total24100.00%2100.00%

typedef int filler_t(void *, struct page *); pgoff_t page_cache_next_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan); pgoff_t page_cache_prev_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan); #define FGP_ACCESSED 0x00000001 #define FGP_LOCK 0x00000002 #define FGP_CREAT 0x00000004 #define FGP_WRITE 0x00000008 #define FGP_NOFS 0x00000010 #define FGP_NOWAIT 0x00000020 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask); /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned with an increased refcount. * * Otherwise, %NULL is returned. */
static inline struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, 0, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman1655.17%116.67%
Linus Torvalds (pre-git)931.03%233.33%
Johannes Weiner26.90%116.67%
Fengguang Wu13.45%116.67%
Andrew Morton13.45%116.67%
Total29100.00%6100.00%


static inline struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset, int fgp_flags) { return pagecache_get_page(mapping, offset, fgp_flags, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman2062.50%133.33%
Johannes Weiner1134.38%133.33%
Linus Torvalds (pre-git)13.12%133.33%
Total32100.00%3100.00%

/** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * Otherwise, %NULL is returned. * * find_lock_page() may sleep. */
static inline struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, FGP_LOCK, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman1655.17%116.67%
Linus Torvalds (pre-git)1034.48%233.33%
Johannes Weiner13.45%116.67%
Fengguang Wu13.45%116.67%
Andrew Morton13.45%116.67%
Total29100.00%6100.00%

/** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * If the page is not present, a new page is allocated using @gfp_mask * and added to the page cache and the VM's LRU list. The page is * returned locked and with an increased refcount. * * On memory exhaustion, %NULL is returned. * * find_or_create_page() may sleep, even if @gfp_flags specifies an * atomic allocation! */
static inline struct page *find_or_create_page(struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { return pagecache_get_page(mapping, offset, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman2158.33%125.00%
Linus Torvalds1336.11%125.00%
Al Viro12.78%125.00%
Fengguang Wu12.78%125.00%
Total36100.00%4100.00%

/** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */
static inline struct page *grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman38100.00%1100.00%
Total38100.00%1100.00%

struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, unsigned int nr_entries, struct page **entries, pgoff_t *indices); unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages); unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, int tag, unsigned int nr_entries, struct page **entries, pgoff_t *indices); struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags); /* * Returns locked page at given index in given cache, creating it if needed. */
static inline struct page *grab_cache_page(struct address_space *mapping, pgoff_t index) { return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig1446.67%112.50%
Andrew Morton930.00%225.00%
Linus Torvalds (pre-git)516.67%337.50%
Fengguang Wu13.33%112.50%
Linus Torvalds13.33%112.50%
Total30100.00%8100.00%

extern struct page * read_cache_page(struct address_space *mapping, pgoff_t index, filler_t *filler, void *data); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, void *data) { filler_t *filler = (filler_t *)mapping->a_ops->readpage; return read_cache_page(mapping, index, filler, data); }

Contributors

PersonTokensPropCommitsCommitProp
Pekka J Enberg4697.87%150.00%
Fengguang Wu12.13%150.00%
Total47100.00%2100.00%

/* * Get index of the page with in radix-tree * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) */
static inline pgoff_t page_to_index(struct page *page) { pgoff_t pgoff; if (likely(!PageTransTail(page))) return page->index; /* * We don't initialize ->index for tail pages: calculate based on * head page */ pgoff = compound_head(page)->index; pgoff += page - compound_head(page); return pgoff; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov3566.04%266.67%
Naoya Horiguchi1833.96%133.33%
Total53100.00%3100.00%

/* * Get the offset in PAGE_SIZE. * (TODO: hugepage should have ->index in PAGE_SIZE) */
static inline pgoff_t page_to_pgoff(struct page *page) { if (unlikely(PageHeadHuge(page))) return page->index << compound_order(page); return page_to_index(page); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov38100.00%1100.00%
Total38100.00%1100.00%

/* * Return byte-offset into filesystem object for page. */
static inline loff_t page_offset(struct page *page) { return ((loff_t)page->index) << PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig2395.83%150.00%
Kirill A. Shutemov14.17%150.00%
Total24100.00%2100.00%


static inline loff_t page_file_offset(struct page *page) { return ((loff_t)page_index(page)) << PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman2392.00%133.33%
Kirill A. Shutemov14.00%133.33%
Huang Ying14.00%133.33%
Total25100.00%3100.00%

extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address);
static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; if (unlikely(is_vm_hugetlb_page(vma))) return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2950.00%250.00%
Naoya Horiguchi2237.93%125.00%
Linus Torvalds712.07%125.00%
Total58100.00%4100.00%

extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page);
static inline int trylock_page(struct page *page) { page = compound_head(page); return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin3288.89%266.67%
Kirill A. Shutemov411.11%133.33%
Total36100.00%3100.00%

/* * lock_page may only be called if we have the page's inode pinned. */
static inline void lock_page(struct page *page) { might_sleep(); if (!trylock_page(page)) __lock_page(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2382.14%133.33%
Ingo Molnar310.71%133.33%
Nicholas Piggin27.14%133.33%
Total28100.00%3100.00%

/* * lock_page_killable is like lock_page but can be interrupted by fatal * signals. It returns 0 if it locked the page and -EINTR if it was * killed while waiting. */
static inline int lock_page_killable(struct page *page) { might_sleep(); if (!trylock_page(page)) return __lock_page_killable(page); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Matthew Wilcox3093.75%150.00%
Nicholas Piggin26.25%150.00%
Total32100.00%2100.00%

/* * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. * * Return value and mmap_sem implications depend on flags; see * __lock_page_or_retry(). */
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) { might_sleep(); return trylock_page(page) || __lock_page_or_retry(page, mm, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Michel Lespinasse39100.00%1100.00%
Total39100.00%1100.00%

/* * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., * and should not be used directly. */ extern void wait_on_page_bit(struct page *page, int bit_nr); extern int wait_on_page_bit_killable(struct page *page, int bit_nr); /* * Wait for a page to be unlocked. * * This must be called with the caller "holding" the page, * ie with increased "page->count" so that the page won't * go away during the wait.. */
static inline void wait_on_page_locked(struct page *page) { if (PageLocked(page)) wait_on_page_bit(compound_head(page), PG_locked); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown1862.07%150.00%
Nicholas Piggin1137.93%150.00%
Total29100.00%2100.00%


static inline int wait_on_page_locked_killable(struct page *page) { if (!PageLocked(page)) return 0; return wait_on_page_bit_killable(compound_head(page), PG_locked); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1955.88%233.33%
Nicholas Piggin926.47%116.67%
Kirill A. Shutemov38.82%116.67%
Christoph Hellwig25.88%116.67%
Linus Torvalds12.94%116.67%
Total34100.00%6100.00%

/* * Wait for a page to complete writeback */
static inline void wait_on_page_writeback(struct page *page) { if (PageWriteback(page)) wait_on_page_bit(page, PG_writeback); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig1765.38%133.33%
Andrew Morton830.77%133.33%
Linus Torvalds (pre-git)13.85%133.33%
Total26100.00%3100.00%

extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); void page_endio(struct page *page, bool is_write, int err); /* * Add an arbitrary waiter to a page's wait queue */ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* * Fault everything in given userspace address range in. */
static inline int fault_in_pages_writeable(char __user *uaddr, int size) { char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return 0; if (unlikely(uaddr > end)) return -EFAULT; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ do { if (unlikely(__put_user(0, uaddr) != 0)) return -EFAULT; uaddr += PAGE_SIZE; } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) return __put_user(0, end); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Vetter7563.56%125.00%
Al Viro2924.58%125.00%
Andrew Morton1311.02%125.00%
Linus Torvalds10.85%125.00%
Total118100.00%4100.00%


static inline int fault_in_pages_readable(const char __user *uaddr, int size) { volatile char c; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return 0; if (unlikely(uaddr > end)) return -EFAULT; do { if (unlikely(__get_user(c, uaddr) != 0)) return -EFAULT; uaddr += PAGE_SIZE; } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) { return __get_user(c, end); } (void)c; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Vetter9673.85%125.00%
Al Viro2922.31%250.00%
Dave Chinner53.85%125.00%
Total130100.00%4100.00%

int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); extern void __delete_from_page_cache(struct page *page, void *shadow); int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __SetPageLocked() against it. */
static inline int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; __SetPageLocked(page); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); if (unlikely(error)) __ClearPageLocked(page); return error; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin5796.61%150.00%
Kirill A. Shutemov23.39%150.00%
Total59100.00%2100.00%


static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
Fabian Frederick2893.33%150.00%
Kirill A. Shutemov26.67%150.00%
Total30100.00%2100.00%

#endif /* _LINUX_PAGEMAP_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin38016.96%109.17%
Andrew Morton23610.53%1311.93%
Mel Gorman1968.75%32.75%
Daniel Vetter1717.63%10.92%
Johannes Weiner1386.16%32.75%
Kirill A. Shutemov974.33%54.59%
Christoph Hellwig954.24%32.75%
Lee Schermerhorn944.19%32.75%
Linus Torvalds (pre-git)743.30%1311.93%
Linus Torvalds652.90%98.26%
Al Viro622.77%54.59%
Michel Lespinasse582.59%10.92%
Naoya Horiguchi572.54%32.75%
Guillaume Chazarain522.32%10.92%
Huang Ying522.32%21.83%
Matthew Wilcox492.19%21.83%
Pekka J Enberg462.05%10.92%
Michal Hocko291.29%32.75%
Ross Zwisler291.29%10.92%
Fabian Frederick281.25%10.92%
Fengguang Wu281.25%21.83%
Harvey Harrison251.12%10.92%
Jens Axboe241.07%21.83%
Trond Myklebust210.94%10.92%
Neil Brown180.80%10.92%
Miklos Szeredi170.76%10.92%
David Howells150.67%10.92%
Paul Jackson150.67%10.92%
Motohiro Kosaki130.58%10.92%
Sasha Levin120.54%10.92%
MinChan Kim110.49%21.83%
Darrick J. Wong90.40%10.92%
JoonSoo Kim60.27%21.83%
Dave Chinner50.22%10.92%
Paul E. McKenney40.18%21.83%
Ingo Molnar30.13%10.92%
Frédéric Weisbecker20.09%10.92%
Andrea Arcangeli20.09%10.92%
Paul Cassella10.04%10.92%
Konstantin Khlebnikov10.04%10.92%
Randy Dunlap10.04%10.92%
Total2241100.00%109100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.