Release 4.12 include/linux/pagemap.h
#ifndef _LINUX_PAGEMAP_H
#define _LINUX_PAGEMAP_H
/*
* Copyright 1995 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
/*
* Bits in mapping->flags.
*/
enum mapping_flags {
AS_EIO = 0, /* IO error on async write */
AS_ENOSPC = 1, /* ENOSPC on async write */
AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
};
static inline void mapping_set_error(struct address_space *mapping, int error)
{
if (unlikely(error)) {
if (error == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else
set_bit(AS_EIO, &mapping->flags);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Guillaume Chazarain | 49 | 94.23% | 1 | 50.00% |
Andrew Morton | 3 | 5.77% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static inline void mapping_set_unevictable(struct address_space *mapping)
{
set_bit(AS_UNEVICTABLE, &mapping->flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline void mapping_clear_unevictable(struct address_space *mapping)
{
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline int mapping_unevictable(struct address_space *mapping)
{
if (mapping)
return test_bit(AS_UNEVICTABLE, &mapping->flags);
return !!mapping;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 32 | 100.00% | 2 | 100.00% |
Total | 32 | 100.00% | 2 | 100.00% |
static inline void mapping_set_exiting(struct address_space *mapping)
{
set_bit(AS_EXITING, &mapping->flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johannes Weiner | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline int mapping_exiting(struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johannes Weiner | 21 | 91.30% | 1 | 50.00% |
Huang Ying | 2 | 8.70% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
{
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline int mapping_use_writeback_tags(struct address_space *mapping)
{
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 22 | 91.67% | 1 | 50.00% |
Johannes Weiner | 2 | 8.33% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return mapping->gfp_mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 15 | 88.24% | 1 | 33.33% |
Al Viro | 1 | 5.88% | 1 | 33.33% |
Michal Hocko | 1 | 5.88% | 1 | 33.33% |
Total | 17 | 100.00% | 3 | 100.00% |
/* Restricts the given gfp_mask to what the mapping allows. */
static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
/*
* This is non-atomic. Only to be used before the mapping is activated.
* Probably needs a barrier...
*/
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
{
m->gfp_mask = mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 19 | 90.48% | 1 | 33.33% |
Al Viro | 1 | 4.76% | 1 | 33.33% |
Michal Hocko | 1 | 4.76% | 1 | 33.33% |
Total | 21 | 100.00% | 3 | 100.00% |
void release_pages(struct page **pages, int nr, bool cold);
/*
* speculatively take a reference to a page.
* If the page is free (_refcount == 0), then _refcount is untouched, and 0
* is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
*
* This function must be called inside the same rcu_read_lock() section as has
* been used to lookup the page in the pagecache radix-tree (or page table):
* this allows allocators to use a synchronize_rcu() to stabilize _refcount.
*
* Unless an RCU grace period has passed, the count of all pages coming out
* of the allocator must be considered unstable. page_count may return higher
* than expected, and put_page must be able to do the right thing when the
* page has been finished with, no matter what it is subsequently allocated
* for (because put_page is what is used here to drop an invalid speculative
* reference).
*
* This is the interesting part of the lockless pagecache (and lockless
* get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
* has the following pattern:
* 1. find page in radix tree
* 2. conditionally increment refcount
* 3. check the page is still in pagecache (if no, goto 1)
*
* Remove-side that cares about stability of _refcount (eg. reclaim) has the
* following (with tree_lock held for write):
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
* B. remove page from pagecache
* C. free the page
*
* There are 2 critical interleavings that matter:
* - 2 runs before A: in this case, A sees elevated refcount and bails out
* - A runs before 2: in this case, 2 sees zero refcount and retries;
* subsequently, B will complete and 1 will find no page, causing the
* lookup to return NULL.
*
* It is possible that between 1 and 2, the page is removed then the exact same
* page is inserted into the same position in pagecache. That's OK: the
* old find_get_page using tree_lock could equally have run before or after
* such a re-insertion, depending on order that locks are granted.
*
* Lookups racing against pagecache insertion isn't a big problem: either 1
* will find the page or it will not. Likewise, the old find_get_page could run
* either before the insertion or afterwards, depending on timing.
*/
static inline int page_cache_get_speculative(struct page *page)
{
VM_BUG_ON(in_interrupt());
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
* this for us.
*
* Pagecache won't be truncated from interrupt context, so if we have
* found a page in the radix tree here, we have pinned its refcount by
* disabling preempt, and hence no need for the "speculative get" that
* SMP requires.
*/
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_inc(page);
#else
if (unlikely(!get_page_unless_zero(page))) {
/*
* Either the page has been freed, or will be freed.
* In either case, retry here and the caller should
* do the right thing (see comments above).
*/
return 0;
}
#endif
VM_BUG_ON_PAGE(PageTail(page), page);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 74 | 83.15% | 1 | 16.67% |
Sasha Levin | 6 | 6.74% | 1 | 16.67% |
Kirill A. Shutemov | 4 | 4.49% | 1 | 16.67% |
Paul E. McKenney | 3 | 3.37% | 1 | 16.67% |
JoonSoo Kim | 1 | 1.12% | 1 | 16.67% |
Frédéric Weisbecker | 1 | 1.12% | 1 | 16.67% |
Total | 89 | 100.00% | 6 | 100.00% |
/*
* Same as above, but add instead of inc (could just be merged)
*/
static inline int page_cache_add_speculative(struct page *page, int count)
{
VM_BUG_ON(in_interrupt());
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_add(page, count);
#else
if (unlikely(!page_ref_add_unless(page, count, 0)))
return 0;
#endif
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 94 | 85.45% | 1 | 16.67% |
Sasha Levin | 6 | 5.45% | 1 | 16.67% |
JoonSoo Kim | 4 | 3.64% | 1 | 16.67% |
Kirill A. Shutemov | 4 | 3.64% | 1 | 16.67% |
Frédéric Weisbecker | 1 | 0.91% | 1 | 16.67% |
Paul E. McKenney | 1 | 0.91% | 1 | 16.67% |
Total | 110 | 100.00% | 6 | 100.00% |
#ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
return alloc_pages(gfp, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 15 | 75.00% | 1 | 50.00% |
Paul Jackson | 5 | 25.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
#endif
static inline struct page *page_cache_alloc(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 19 | 82.61% | 1 | 33.33% |
Andrew Morton | 3 | 13.04% | 1 | 33.33% |
Nicholas Piggin | 1 | 4.35% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 24 | 96.00% | 2 | 66.67% |
Nicholas Piggin | 1 | 4.00% | 1 | 33.33% |
Total | 25 | 100.00% | 3 | 100.00% |
static inline gfp_t readahead_gfp_mask(struct address_space *x)
{
return mapping_gfp_mask(x) |
__GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 22 | 91.67% | 1 | 50.00% |
Michal Hocko | 2 | 8.33% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
typedef int filler_t(void *, struct page *);
pgoff_t page_cache_next_hole(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
pgoff_t page_cache_prev_hole(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001
#define FGP_LOCK 0x00000002
#define FGP_CREAT 0x00000004
#define FGP_WRITE 0x00000008
#define FGP_NOFS 0x00000010
#define FGP_NOWAIT 0x00000020
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t cache_gfp_mask);
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned with an increased refcount.
*
* Otherwise, %NULL is returned.
*/
static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset)
{
return pagecache_get_page(mapping, offset, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 16 | 55.17% | 1 | 16.67% |
Linus Torvalds (pre-git) | 9 | 31.03% | 2 | 33.33% |
Johannes Weiner | 2 | 6.90% | 1 | 16.67% |
Fengguang Wu | 1 | 3.45% | 1 | 16.67% |
Andrew Morton | 1 | 3.45% | 1 | 16.67% |
Total | 29 | 100.00% | 6 | 100.00% |
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 20 | 62.50% | 1 | 33.33% |
Johannes Weiner | 11 | 34.38% | 1 | 33.33% |
Linus Torvalds (pre-git) | 1 | 3.12% | 1 | 33.33% |
Total | 32 | 100.00% | 3 | 100.00% |
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
* @offset: the page index
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
* Otherwise, %NULL is returned.
*
* find_lock_page() may sleep.
*/
static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t offset)
{
return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 16 | 55.17% | 1 | 16.67% |
Linus Torvalds (pre-git) | 10 | 34.48% | 2 | 33.33% |
Johannes Weiner | 1 | 3.45% | 1 | 16.67% |
Fengguang Wu | 1 | 3.45% | 1 | 16.67% |
Andrew Morton | 1 | 3.45% | 1 | 16.67% |
Total | 29 | 100.00% | 6 | 100.00% |
/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
* If the page is not present, a new page is allocated using @gfp_mask
* and added to the page cache and the VM's LRU list. The page is
* returned locked and with an increased refcount.
*
* On memory exhaustion, %NULL is returned.
*
* find_or_create_page() may sleep, even if @gfp_flags specifies an
* atomic allocation!
*/
static inline struct page *find_or_create_page(struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
return pagecache_get_page(mapping, offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
gfp_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 21 | 58.33% | 1 | 25.00% |
Linus Torvalds | 13 | 36.11% | 1 | 25.00% |
Al Viro | 1 | 2.78% | 1 | 25.00% |
Fengguang Wu | 1 | 2.78% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
/**
* grab_cache_page_nowait - returns locked page at given index in given cache
* @mapping: target address_space
* @index: the page index
*
* Same as grab_cache_page(), but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
*
* Clear __GFP_FS when allocating the page to avoid recursion into the fs
* and deadlock against the caller's locked page.
*/
static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
pgoff_t index)
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
mapping_gfp_mask(mapping));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages);
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags);
/*
* Returns locked page at given index in given cache, creating it if needed.
*/
static inline struct page *grab_cache_page(struct address_space *mapping,
pgoff_t index)
{
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 14 | 46.67% | 1 | 12.50% |
Andrew Morton | 9 | 30.00% | 2 | 25.00% |
Linus Torvalds (pre-git) | 5 | 16.67% | 3 | 37.50% |
Fengguang Wu | 1 | 3.33% | 1 | 12.50% |
Linus Torvalds | 1 | 3.33% | 1 | 12.50% |
Total | 30 | 100.00% | 8 | 100.00% |
extern struct page * read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return read_cache_page(mapping, index, filler, data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pekka J Enberg | 46 | 97.87% | 1 | 50.00% |
Fengguang Wu | 1 | 2.13% | 1 | 50.00% |
Total | 47 | 100.00% | 2 | 100.00% |
/*
* Get index of the page with in radix-tree
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_index(struct page *page)
{
pgoff_t pgoff;
if (likely(!PageTransTail(page)))
return page->index;
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
pgoff = compound_head(page)->index;
pgoff += page - compound_head(page);
return pgoff;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 35 | 66.04% | 2 | 66.67% |
Naoya Horiguchi | 18 | 33.96% | 1 | 33.33% |
Total | 53 | 100.00% | 3 | 100.00% |
/*
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);
return page_to_index(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
{
return ((loff_t)page->index) << PAGE_SHIFT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 23 | 95.83% | 1 | 50.00% |
Kirill A. Shutemov | 1 | 4.17% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static inline loff_t page_file_offset(struct page *page)
{
return ((loff_t)page_index(page)) << PAGE_SHIFT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 23 | 92.00% | 1 | 33.33% |
Kirill A. Shutemov | 1 | 4.00% | 1 | 33.33% |
Huang Ying | 1 | 4.00% | 1 | 33.33% |
Total | 25 | 100.00% | 3 | 100.00% |
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
unsigned long address)
{
pgoff_t pgoff;
if (unlikely(is_vm_hugetlb_page(vma)))
return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
return pgoff;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 29 | 50.00% | 2 | 50.00% |
Naoya Horiguchi | 22 | 37.93% | 1 | 25.00% |
Linus Torvalds | 7 | 12.07% | 1 | 25.00% |
Total | 58 | 100.00% | 4 | 100.00% |
extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
static inline int trylock_page(struct page *page)
{
page = compound_head(page);
return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 32 | 88.89% | 2 | 66.67% |
Kirill A. Shutemov | 4 | 11.11% | 1 | 33.33% |
Total | 36 | 100.00% | 3 | 100.00% |
/*
* lock_page may only be called if we have the page's inode pinned.
*/
static inline void lock_page(struct page *page)
{
might_sleep();
if (!trylock_page(page))
__lock_page(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 23 | 82.14% | 1 | 33.33% |
Ingo Molnar | 3 | 10.71% | 1 | 33.33% |
Nicholas Piggin | 2 | 7.14% | 1 | 33.33% |
Total | 28 | 100.00% | 3 | 100.00% |
/*
* lock_page_killable is like lock_page but can be interrupted by fatal
* signals. It returns 0 if it locked the page and -EINTR if it was
* killed while waiting.
*/
static inline int lock_page_killable(struct page *page)
{
might_sleep();
if (!trylock_page(page))
return __lock_page_killable(page);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 30 | 93.75% | 1 | 50.00% |
Nicholas Piggin | 2 | 6.25% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
/*
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_sem implications depend on flags; see
* __lock_page_or_retry().
*/
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
might_sleep();
return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michel Lespinasse | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
/*
* This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
* and should not be used directly.
*/
extern void wait_on_page_bit(struct page *page, int bit_nr);
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
/*
* Wait for a page to be unlocked.
*
* This must be called with the caller "holding" the page,
* ie with increased "page->count" so that the page won't
* go away during the wait..
*/
static inline void wait_on_page_locked(struct page *page)
{
if (PageLocked(page))
wait_on_page_bit(compound_head(page), PG_locked);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 18 | 62.07% | 1 | 50.00% |
Nicholas Piggin | 11 | 37.93% | 1 | 50.00% |
Total | 29 | 100.00% | 2 | 100.00% |
static inline int wait_on_page_locked_killable(struct page *page)
{
if (!PageLocked(page))
return 0;
return wait_on_page_bit_killable(compound_head(page), PG_locked);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 19 | 55.88% | 2 | 33.33% |
Nicholas Piggin | 9 | 26.47% | 1 | 16.67% |
Kirill A. Shutemov | 3 | 8.82% | 1 | 16.67% |
Christoph Hellwig | 2 | 5.88% | 1 | 16.67% |
Linus Torvalds | 1 | 2.94% | 1 | 16.67% |
Total | 34 | 100.00% | 6 | 100.00% |
/*
* Wait for a page to complete writeback
*/
static inline void wait_on_page_writeback(struct page *page)
{
if (PageWriteback(page))
wait_on_page_bit(page, PG_writeback);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 17 | 65.38% | 1 | 33.33% |
Andrew Morton | 8 | 30.77% | 1 | 33.33% |
Linus Torvalds (pre-git) | 1 | 3.85% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
void page_endio(struct page *page, bool is_write, int err);
/*
* Add an arbitrary waiter to a page's wait queue
*/
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
/*
* Fault everything in given userspace address range in.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
return 0;
if (unlikely(uaddr > end))
return -EFAULT;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
do {
if (unlikely(__put_user(0, uaddr) != 0))
return -EFAULT;
uaddr += PAGE_SIZE;
} while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
return __put_user(0, end);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Vetter | 75 | 63.56% | 1 | 25.00% |
Al Viro | 29 | 24.58% | 1 | 25.00% |
Andrew Morton | 13 | 11.02% | 1 | 25.00% |
Linus Torvalds | 1 | 0.85% | 1 | 25.00% |
Total | 118 | 100.00% | 4 | 100.00% |
static inline int fault_in_pages_readable(const char __user *uaddr, int size)
{
volatile char c;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
return 0;
if (unlikely(uaddr > end))
return -EFAULT;
do {
if (unlikely(__get_user(c, uaddr) != 0))
return -EFAULT;
uaddr += PAGE_SIZE;
} while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
return __get_user(c, end);
}
(void)c;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Vetter | 96 | 73.85% | 1 | 25.00% |
Al Viro | 29 | 22.31% | 2 | 50.00% |
Dave Chinner | 5 | 3.85% | 1 | 25.00% |
Total | 130 | 100.00% | 4 | 100.00% |
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run __SetPageLocked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
__SetPageLocked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
__ClearPageLocked(page);
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 57 | 96.61% | 1 | 50.00% |
Kirill A. Shutemov | 2 | 3.39% | 1 | 50.00% |
Total | 59 | 100.00% | 2 | 100.00% |
static inline unsigned long dir_pages(struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fabian Frederick | 28 | 93.33% | 1 | 50.00% |
Kirill A. Shutemov | 2 | 6.67% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
#endif /* _LINUX_PAGEMAP_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 380 | 16.96% | 10 | 9.17% |
Andrew Morton | 236 | 10.53% | 13 | 11.93% |
Mel Gorman | 196 | 8.75% | 3 | 2.75% |
Daniel Vetter | 171 | 7.63% | 1 | 0.92% |
Johannes Weiner | 138 | 6.16% | 3 | 2.75% |
Kirill A. Shutemov | 97 | 4.33% | 5 | 4.59% |
Christoph Hellwig | 95 | 4.24% | 3 | 2.75% |
Lee Schermerhorn | 94 | 4.19% | 3 | 2.75% |
Linus Torvalds (pre-git) | 74 | 3.30% | 13 | 11.93% |
Linus Torvalds | 65 | 2.90% | 9 | 8.26% |
Al Viro | 62 | 2.77% | 5 | 4.59% |
Michel Lespinasse | 58 | 2.59% | 1 | 0.92% |
Naoya Horiguchi | 57 | 2.54% | 3 | 2.75% |
Guillaume Chazarain | 52 | 2.32% | 1 | 0.92% |
Huang Ying | 52 | 2.32% | 2 | 1.83% |
Matthew Wilcox | 49 | 2.19% | 2 | 1.83% |
Pekka J Enberg | 46 | 2.05% | 1 | 0.92% |
Michal Hocko | 29 | 1.29% | 3 | 2.75% |
Ross Zwisler | 29 | 1.29% | 1 | 0.92% |
Fabian Frederick | 28 | 1.25% | 1 | 0.92% |
Fengguang Wu | 28 | 1.25% | 2 | 1.83% |
Harvey Harrison | 25 | 1.12% | 1 | 0.92% |
Jens Axboe | 24 | 1.07% | 2 | 1.83% |
Trond Myklebust | 21 | 0.94% | 1 | 0.92% |
Neil Brown | 18 | 0.80% | 1 | 0.92% |
Miklos Szeredi | 17 | 0.76% | 1 | 0.92% |
David Howells | 15 | 0.67% | 1 | 0.92% |
Paul Jackson | 15 | 0.67% | 1 | 0.92% |
Motohiro Kosaki | 13 | 0.58% | 1 | 0.92% |
Sasha Levin | 12 | 0.54% | 1 | 0.92% |
MinChan Kim | 11 | 0.49% | 2 | 1.83% |
Darrick J. Wong | 9 | 0.40% | 1 | 0.92% |
JoonSoo Kim | 6 | 0.27% | 2 | 1.83% |
Dave Chinner | 5 | 0.22% | 1 | 0.92% |
Paul E. McKenney | 4 | 0.18% | 2 | 1.83% |
Ingo Molnar | 3 | 0.13% | 1 | 0.92% |
Frédéric Weisbecker | 2 | 0.09% | 1 | 0.92% |
Andrea Arcangeli | 2 | 0.09% | 1 | 0.92% |
Paul Cassella | 1 | 0.04% | 1 | 0.92% |
Konstantin Khlebnikov | 1 | 0.04% | 1 | 0.92% |
Randy Dunlap | 1 | 0.04% | 1 | 0.92% |
Total | 2241 | 100.00% | 109 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.