Release 4.7 include/linux/pagemap.h
#ifndef _LINUX_PAGEMAP_H
#define _LINUX_PAGEMAP_H
/*
* Copyright 1995 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
/*
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
* allocation mode flags.
*/
enum mapping_flags {
AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
};
static inline void mapping_set_error(struct address_space *mapping, int error)
{
if (unlikely(error)) {
if (error == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else
set_bit(AS_EIO, &mapping->flags);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
guillaume chazarain | guillaume chazarain | 49 | 94.23% | 1 | 50.00% |
andrew morton | andrew morton | 3 | 5.77% | 1 | 50.00% |
| Total | 52 | 100.00% | 2 | 100.00% |
static inline void mapping_set_unevictable(struct address_space *mapping)
{
set_bit(AS_UNEVICTABLE, &mapping->flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
lee schermerhorn | lee schermerhorn | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
static inline void mapping_clear_unevictable(struct address_space *mapping)
{
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
lee schermerhorn | lee schermerhorn | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
static inline int mapping_unevictable(struct address_space *mapping)
{
if (mapping)
return test_bit(AS_UNEVICTABLE, &mapping->flags);
return !!mapping;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
lee schermerhorn | lee schermerhorn | 32 | 100.00% | 2 | 100.00% |
| Total | 32 | 100.00% | 2 | 100.00% |
static inline void mapping_set_exiting(struct address_space *mapping)
{
set_bit(AS_EXITING, &mapping->flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
johannes weiner | johannes weiner | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
static inline int mapping_exiting(struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
johannes weiner | johannes weiner | 23 | 100.00% | 1 | 100.00% |
| Total | 23 | 100.00% | 1 | 100.00% |
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 18 | 78.26% | 1 | 33.33% |
al viro | al viro | 5 | 21.74% | 2 | 66.67% |
| Total | 23 | 100.00% | 3 | 100.00% |
/* Restricts the given gfp_mask to what the mapping allows. */
static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 23 | 100.00% | 1 | 100.00% |
| Total | 23 | 100.00% | 1 | 100.00% |
/*
* This is non-atomic. Only to be used before the mapping is activated.
* Probably needs a barrier...
*/
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
{
m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
(__force unsigned long)mask;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 29 | 72.50% | 1 | 50.00% |
al viro | al viro | 11 | 27.50% | 1 | 50.00% |
| Total | 40 | 100.00% | 2 | 100.00% |
void release_pages(struct page **pages, int nr, bool cold);
/*
* speculatively take a reference to a page.
* If the page is free (_refcount == 0), then _refcount is untouched, and 0
* is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
*
* This function must be called inside the same rcu_read_lock() section as has
* been used to lookup the page in the pagecache radix-tree (or page table):
* this allows allocators to use a synchronize_rcu() to stabilize _refcount.
*
* Unless an RCU grace period has passed, the count of all pages coming out
* of the allocator must be considered unstable. page_count may return higher
* than expected, and put_page must be able to do the right thing when the
* page has been finished with, no matter what it is subsequently allocated
* for (because put_page is what is used here to drop an invalid speculative
* reference).
*
* This is the interesting part of the lockless pagecache (and lockless
* get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
* has the following pattern:
* 1. find page in radix tree
* 2. conditionally increment refcount
* 3. check the page is still in pagecache (if no, goto 1)
*
* Remove-side that cares about stability of _refcount (eg. reclaim) has the
* following (with tree_lock held for write):
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
* B. remove page from pagecache
* C. free the page
*
* There are 2 critical interleavings that matter:
* - 2 runs before A: in this case, A sees elevated refcount and bails out
* - A runs before 2: in this case, 2 sees zero refcount and retries;
* subsequently, B will complete and 1 will find no page, causing the
* lookup to return NULL.
*
* It is possible that between 1 and 2, the page is removed then the exact same
* page is inserted into the same position in pagecache. That's OK: the
* old find_get_page using tree_lock could equally have run before or after
* such a re-insertion, depending on order that locks are granted.
*
* Lookups racing against pagecache insertion isn't a big problem: either 1
* will find the page or it will not. Likewise, the old find_get_page could run
* either before the insertion or afterwards, depending on timing.
*/
static inline int page_cache_get_speculative(struct page *page)
{
VM_BUG_ON(in_interrupt());
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
* this for us.
*
* Pagecache won't be truncated from interrupt context, so if we have
* found a page in the radix tree here, we have pinned its refcount by
* disabling preempt, and hence no need for the "speculative get" that
* SMP requires.
*/
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_inc(page);
#else
if (unlikely(!get_page_unless_zero(page))) {
/*
* Either the page has been freed, or will be freed.
* In either case, retry here and the caller should
* do the right thing (see comments above).
*/
return 0;
}
#endif
VM_BUG_ON_PAGE(PageTail(page), page);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 74 | 87.06% | 1 | 20.00% |
sasha levin | sasha levin | 6 | 7.06% | 1 | 20.00% |
paul e. mckenney | paul e. mckenney | 3 | 3.53% | 1 | 20.00% |
joonsoo kim | joonsoo kim | 1 | 1.18% | 1 | 20.00% |
frederic weisbecker | frederic weisbecker | 1 | 1.18% | 1 | 20.00% |
| Total | 85 | 100.00% | 5 | 100.00% |
/*
* Same as above, but add instead of inc (could just be merged)
*/
static inline int page_cache_add_speculative(struct page *page, int count)
{
VM_BUG_ON(in_interrupt());
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_add(page, count);
#else
if (unlikely(!page_ref_add_unless(page, count, 0)))
return 0;
#endif
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 94 | 88.68% | 1 | 20.00% |
sasha levin | sasha levin | 6 | 5.66% | 1 | 20.00% |
joonsoo kim | joonsoo kim | 4 | 3.77% | 1 | 20.00% |
frederic weisbecker | frederic weisbecker | 1 | 0.94% | 1 | 20.00% |
paul e. mckenney | paul e. mckenney | 1 | 0.94% | 1 | 20.00% |
| Total | 106 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
return alloc_pages(gfp, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 15 | 75.00% | 1 | 50.00% |
paul jackson | paul jackson | 5 | 25.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
#endif
static inline struct page *page_cache_alloc(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 19 | 82.61% | 1 | 33.33% |
andrew morton | andrew morton | 3 | 13.04% | 1 | 33.33% |
nick piggin | nick piggin | 1 | 4.35% | 1 | 33.33% |
| Total | 23 | 100.00% | 3 | 100.00% |
static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 24 | 96.00% | 2 | 66.67% |
nick piggin | nick piggin | 1 | 4.00% | 1 | 33.33% |
| Total | 25 | 100.00% | 3 | 100.00% |
static inline struct page *page_cache_alloc_readahead(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x) |
__GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fengguang wu | fengguang wu | 29 | 100.00% | 1 | 100.00% |
| Total | 29 | 100.00% | 1 | 100.00% |
typedef int filler_t(void *, struct page *);
pgoff_t page_cache_next_hole(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
pgoff_t page_cache_prev_hole(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001
#define FGP_LOCK 0x00000002
#define FGP_CREAT 0x00000004
#define FGP_WRITE 0x00000008
#define FGP_NOFS 0x00000010
#define FGP_NOWAIT 0x00000020
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t cache_gfp_mask);
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned with an increased refcount.
*
* Otherwise, %NULL is returned.
*/
static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset)
{
return pagecache_get_page(mapping, offset, 0, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 16 | 55.17% | 1 | 16.67% |
pre-git | pre-git | 9 | 31.03% | 2 | 33.33% |
johannes weiner | johannes weiner | 2 | 6.90% | 1 | 16.67% |
andrew morton | andrew morton | 1 | 3.45% | 1 | 16.67% |
wu fengguang | wu fengguang | 1 | 3.45% | 1 | 16.67% |
| Total | 29 | 100.00% | 6 | 100.00% |
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 20 | 62.50% | 1 | 33.33% |
johannes weiner | johannes weiner | 11 | 34.38% | 1 | 33.33% |
pre-git | pre-git | 1 | 3.12% | 1 | 33.33% |
| Total | 32 | 100.00% | 3 | 100.00% |
/**
* find_lock_page - locate, pin and lock a pagecache page
* pagecache_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
* Otherwise, %NULL is returned.
*
* find_lock_page() may sleep.
*/
static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t offset)
{
return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 16 | 55.17% | 1 | 16.67% |
pre-git | pre-git | 10 | 34.48% | 2 | 33.33% |
johannes weiner | johannes weiner | 1 | 3.45% | 1 | 16.67% |
andrew morton | andrew morton | 1 | 3.45% | 1 | 16.67% |
wu fengguang | wu fengguang | 1 | 3.45% | 1 | 16.67% |
| Total | 29 | 100.00% | 6 | 100.00% |
/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
* If the page is not present, a new page is allocated using @gfp_mask
* and added to the page cache and the VM's LRU list. The page is
* returned locked and with an increased refcount.
*
* On memory exhaustion, %NULL is returned.
*
* find_or_create_page() may sleep, even if @gfp_flags specifies an
* atomic allocation!
*/
static inline struct page *find_or_create_page(struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
return pagecache_get_page(mapping, offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
gfp_mask);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 21 | 58.33% | 1 | 25.00% |
linus torvalds | linus torvalds | 13 | 36.11% | 1 | 25.00% |
al viro | al viro | 1 | 2.78% | 1 | 25.00% |
wu fengguang | wu fengguang | 1 | 2.78% | 1 | 25.00% |
| Total | 36 | 100.00% | 4 | 100.00% |
/**
* grab_cache_page_nowait - returns locked page at given index in given cache
* @mapping: target address_space
* @index: the page index
*
* Same as grab_cache_page(), but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
*
* Clear __GFP_FS when allocating the page to avoid recursion into the fs
* and deadlock against the caller's locked page.
*/
static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
pgoff_t index)
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
mapping_gfp_mask(mapping));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 38 | 100.00% | 1 | 100.00% |
| Total | 38 | 100.00% | 1 | 100.00% |
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages);
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags);
/*
* Returns locked page at given index in given cache, creating it if needed.
*/
static inline struct page *grab_cache_page(struct address_space *mapping,
pgoff_t index)
{
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 14 | 46.67% | 1 | 12.50% |
andrew morton | andrew morton | 9 | 30.00% | 2 | 25.00% |
pre-git | pre-git | 5 | 16.67% | 3 | 37.50% |
linus torvalds | linus torvalds | 1 | 3.33% | 1 | 12.50% |
wu fengguang | wu fengguang | 1 | 3.33% | 1 | 12.50% |
| Total | 30 | 100.00% | 8 | 100.00% |
extern struct page * read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return read_cache_page(mapping, index, filler, data);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pekka j enberg | pekka j enberg | 46 | 97.87% | 1 | 50.00% |
wu fengguang | wu fengguang | 1 | 2.13% | 1 | 50.00% |
| Total | 47 | 100.00% | 2 | 100.00% |
/*
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
pgoff_t pgoff;
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);
if (likely(!PageTransTail(page)))
return page->index;
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
pgoff = compound_head(page)->index;
pgoff += page - compound_head(page);
return pgoff;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
naoya horiguchi | naoya horiguchi | 37 | 50.68% | 1 | 50.00% |
kirill a. shutemov | kirill a. shutemov | 36 | 49.32% | 1 | 50.00% |
| Total | 73 | 100.00% | 2 | 100.00% |
/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
{
return ((loff_t)page->index) << PAGE_SHIFT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 23 | 95.83% | 1 | 50.00% |
kirill a. shutemov | kirill a. shutemov | 1 | 4.17% | 1 | 50.00% |
| Total | 24 | 100.00% | 2 | 100.00% |
static inline loff_t page_file_offset(struct page *page)
{
return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mel gorman | mel gorman | 24 | 96.00% | 1 | 50.00% |
kirill a. shutemov | kirill a. shutemov | 1 | 4.00% | 1 | 50.00% |
| Total | 25 | 100.00% | 2 | 100.00% |
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
unsigned long address)
{
pgoff_t pgoff;
if (unlikely(is_vm_hugetlb_page(vma)))
return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
return pgoff;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 29 | 50.00% | 2 | 50.00% |
naoya horiguchi | naoya horiguchi | 22 | 37.93% | 1 | 25.00% |
linus torvalds | linus torvalds | 7 | 12.07% | 1 | 25.00% |
| Total | 58 | 100.00% | 4 | 100.00% |
extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
static inline int trylock_page(struct page *page)
{
page = compound_head(page);
return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 32 | 88.89% | 2 | 66.67% |
kirill a. shutemov | kirill a. shutemov | 4 | 11.11% | 1 | 33.33% |
| Total | 36 | 100.00% | 3 | 100.00% |
/*
* lock_page may only be called if we have the page's inode pinned.
*/
static inline void lock_page(struct page *page)
{
might_sleep();
if (!trylock_page(page))
__lock_page(page);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 23 | 82.14% | 1 | 33.33% |
ingo molnar | ingo molnar | 3 | 10.71% | 1 | 33.33% |
nick piggin | nick piggin | 2 | 7.14% | 1 | 33.33% |
| Total | 28 | 100.00% | 3 | 100.00% |
/*
* lock_page_killable is like lock_page but can be interrupted by fatal
* signals. It returns 0 if it locked the page and -EINTR if it was
* killed while waiting.
*/
static inline int lock_page_killable(struct page *page)
{
might_sleep();
if (!trylock_page(page))
return __lock_page_killable(page);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matthew wilcox | matthew wilcox | 30 | 93.75% | 1 | 50.00% |
nick piggin | nick piggin | 2 | 6.25% | 1 | 50.00% |
| Total | 32 | 100.00% | 2 | 100.00% |
/*
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_sem implications depend on flags; see
* __lock_page_or_retry().
*/
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
might_sleep();
return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michel lespinasse | michel lespinasse | 39 | 100.00% | 1 | 100.00% |
| Total | 39 | 100.00% | 1 | 100.00% |
/*
* This is exported only for wait_on_page_locked/wait_on_page_writeback,
* and for filesystems which need to wait on PG_private.
*/
extern void wait_on_page_bit(struct page *page, int bit_nr);
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
extern int wait_on_page_bit_killable_timeout(struct page *page,
int bit_nr, unsigned long timeout);
static inline int wait_on_page_locked_killable(struct page *page)
{
if (!PageLocked(page))
return 0;
return wait_on_page_bit_killable(compound_head(page), PG_locked);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kosaki motohiro | kosaki motohiro | 27 | 79.41% | 1 | 50.00% |
kirill a. shutemov | kirill a. shutemov | 7 | 20.59% | 1 | 50.00% |
| Total | 34 | 100.00% | 2 | 100.00% |
extern wait_queue_head_t *page_waitqueue(struct page *page);
static inline void wake_up_page(struct page *page, int bit)
{
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
neil brown | neil brown | 30 | 100.00% | 1 | 100.00% |
| Total | 30 | 100.00% | 1 | 100.00% |
/*
* Wait for a page to be unlocked.
*
* This must be called with the caller "holding" the page,
* ie with increased "page->count" so that the page won't
* go away during the wait..
*/
static inline void wait_on_page_locked(struct page *page)
{
if (PageLocked(page))
wait_on_page_bit(compound_head(page), PG_locked);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 21 | 72.41% | 2 | 33.33% |
christoph hellwig | christoph hellwig | 3 | 10.34% | 1 | 16.67% |
kirill a. shutemov | kirill a. shutemov | 3 | 10.34% | 1 | 16.67% |
andrew morton | andrew morton | 1 | 3.45% | 1 | 16.67% |
linus torvalds | linus torvalds | 1 | 3.45% | 1 | 16.67% |
| Total | 29 | 100.00% | 6 | 100.00% |
/*
* Wait for a page to complete writeback
*/
static inline void wait_on_page_writeback(struct page *page)
{
if (PageWriteback(page))
wait_on_page_bit(page, PG_writeback);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 17 | 65.38% | 1 | 33.33% |
andrew morton | andrew morton | 8 | 30.77% | 1 | 33.33% |
pre-git | pre-git | 1 | 3.85% | 1 | 33.33% |
| Total | 26 | 100.00% | 3 | 100.00% |
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
void page_endio(struct page *page, int rw, int err);
/*
* Add an arbitrary waiter to a page's wait queue
*/
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
/*
* Fault one or two userspace pages into pagetables.
* Return -EINVAL if more than two pages would be needed.
* Return non-zero on a fault.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
int span, ret;
if (unlikely(size == 0))
return 0;
span = offset_in_page(uaddr) + size;
if (span > 2 * PAGE_SIZE)
return -EINVAL;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
ret = __put_user(0, uaddr);
if (ret == 0 && span > PAGE_SIZE)
ret = __put_user(0, uaddr + size - 1);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 44 | 49.44% | 1 | 25.00% |
eric dumazet | eric dumazet | 32 | 35.96% | 1 | 25.00% |
nick piggin | nick piggin | 12 | 13.48% | 1 | 25.00% |
linus torvalds | linus torvalds | 1 | 1.12% | 1 | 25.00% |
| Total | 89 | 100.00% | 4 | 100.00% |
static inline int fault_in_pages_readable(const char __user *uaddr, int size)
{
volatile char c;
int ret;
if (unlikely(size == 0))
return 0;
ret = __get_user(c, uaddr);
if (ret == 0) {
const char __user *end = uaddr + size - 1;
if (((unsigned long)uaddr & PAGE_MASK) !=
((unsigned long)end & PAGE_MASK)) {
ret = __get_user(c, end);
(void)c;
}
}
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 78 | 74.29% | 1 | 25.00% |
nick piggin | nick piggin | 18 | 17.14% | 1 | 25.00% |
andi kleen | andi kleen | 7 | 6.67% | 1 | 25.00% |
linus torvalds | linus torvalds | 2 | 1.90% | 1 | 25.00% |
| Total | 105 | 100.00% | 4 | 100.00% |
/*
* Multipage variants of the above prefault helpers, useful if more than
* PAGE_SIZE of data needs to be prefaulted. These are separate from the above
* functions (which only handle up to PAGE_SIZE) to avoid clobbering the
* filemap.c hotpaths.
*/
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{
int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
return ret;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
while (uaddr <= end) {
ret = __put_user(0, uaddr);
if (ret != 0)
return ret;
uaddr += PAGE_SIZE;
}
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
ret = __put_user(0, end);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
daniel vetter | daniel vetter | 106 | 97.25% | 1 | 50.00% |
paul gortmaker | paul gortmaker | 3 | 2.75% | 1 | 50.00% |
| Total | 109 | 100.00% | 2 | 100.00% |
static inline int fault_in_multipages_readable(const char __user *uaddr,
int size)
{
volatile char c;
int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
return ret;
while (uaddr <= end) {
ret = __get_user(c, uaddr);
if (ret != 0)
return ret;
uaddr += PAGE_SIZE;
}
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
ret = __get_user(c, end);
(void)c;
}
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
daniel vetter | daniel vetter | 118 | 97.52% | 1 | 50.00% |
paul gortmaker | paul gortmaker | 3 | 2.48% | 1 | 50.00% |
| Total | 121 | 100.00% | 2 | 100.00% |
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run __SetPageLocked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
__SetPageLocked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
__ClearPageLocked(page);
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 57 | 96.61% | 1 | 50.00% |
kirill a. shutemov | kirill a. shutemov | 2 | 3.39% | 1 | 50.00% |
| Total | 59 | 100.00% | 2 | 100.00% |
static inline unsigned long dir_pages(struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fabian frederick | fabian frederick | 28 | 93.33% | 1 | 50.00% |
kirill a. shutemov | kirill a. shutemov | 2 | 6.67% | 1 | 50.00% |
| Total | 30 | 100.00% | 2 | 100.00% |
#endif /* _LINUX_PAGEMAP_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 388 | 15.91% | 9 | 9.00% |
andrew morton | andrew morton | 361 | 14.80% | 13 | 13.00% |
daniel vetter | daniel vetter | 225 | 9.23% | 1 | 1.00% |
mel gorman | mel gorman | 198 | 8.12% | 3 | 3.00% |
johannes weiner | johannes weiner | 140 | 5.74% | 3 | 3.00% |
lee schermerhorn | lee schermerhorn | 103 | 4.22% | 3 | 3.00% |
christoph hellwig | christoph hellwig | 97 | 3.98% | 3 | 3.00% |
naoya horiguchi | naoya horiguchi | 77 | 3.16% | 3 | 3.00% |
pre-git | pre-git | 76 | 3.12% | 13 | 13.00% |
linus torvalds | linus torvalds | 66 | 2.71% | 8 | 8.00% |
neil brown | neil brown | 59 | 2.42% | 2 | 2.00% |
michel lespinasse | michel lespinasse | 58 | 2.38% | 1 | 1.00% |
kirill a. shutemov | kirill a. shutemov | 57 | 2.34% | 3 | 3.00% |
guillaume chazarain | guillaume chazarain | 52 | 2.13% | 1 | 1.00% |
matthew wilcox | matthew wilcox | 51 | 2.09% | 2 | 2.00% |
pekka j enberg | pekka j enberg | 46 | 1.89% | 1 | 1.00% |
kosaki motohiro | kosaki motohiro | 40 | 1.64% | 1 | 1.00% |
eric dumazet | eric dumazet | 33 | 1.35% | 1 | 1.00% |
fengguang wu | fengguang wu | 29 | 1.19% | 1 | 1.00% |
ross zwisler | ross zwisler | 29 | 1.19% | 1 | 1.00% |
fabian frederick | fabian frederick | 28 | 1.15% | 1 | 1.00% |
harvey harrison | harvey harrison | 25 | 1.03% | 1 | 1.00% |
michal hocko | michal hocko | 24 | 0.98% | 1 | 1.00% |
jens axboe | jens axboe | 22 | 0.90% | 1 | 1.00% |
trond myklebust | trond myklebust | 21 | 0.86% | 1 | 1.00% |
al viro | al viro | 17 | 0.70% | 3 | 3.00% |
miklos szeredi | miklos szeredi | 17 | 0.70% | 1 | 1.00% |
paul jackson | paul jackson | 15 | 0.62% | 1 | 1.00% |
david howells | david howells | 15 | 0.62% | 1 | 1.00% |
sasha levin | sasha levin | 12 | 0.49% | 1 | 1.00% |
minchan kim | minchan kim | 11 | 0.45% | 2 | 2.00% |
darrick j. wong | darrick j. wong | 9 | 0.37% | 1 | 1.00% |
andi kleen | andi kleen | 7 | 0.29% | 1 | 1.00% |
paul gortmaker | paul gortmaker | 6 | 0.25% | 1 | 1.00% |
wu fengguang | wu fengguang | 6 | 0.25% | 1 | 1.00% |
joonsoo kim | joonsoo kim | 6 | 0.25% | 2 | 2.00% |
paul e. mckenney | paul e. mckenney | 4 | 0.16% | 2 | 2.00% |
ingo molnar | ingo molnar | 3 | 0.12% | 1 | 1.00% |
andrea arcangeli | andrea arcangeli | 2 | 0.08% | 1 | 1.00% |
frederic weisbecker | frederic weisbecker | 2 | 0.08% | 1 | 1.00% |
konstantin khlebnikov | konstantin khlebnikov | 1 | 0.04% | 1 | 1.00% |
paul cassella | paul cassella | 1 | 0.04% | 1 | 1.00% |
| Total | 2439 | 100.00% | 100 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.