cregit-Linux how code gets into the kernel

Release 4.10 fs/fscache/page.c

Directory: fs/fscache
/* Cache page management and data I/O routines
 *
 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */


#define FSCACHE_DEBUG_LEVEL PAGE
#include <linux/module.h>
#include <linux/fscache-cache.h>
#include <linux/buffer_head.h>
#include <linux/pagevec.h>
#include <linux/slab.h>
#include "internal.h"

/*
 * check to see if a page is being written to the cache
 */

bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) { void *val; rcu_read_lock(); val = radix_tree_lookup(&cookie->stores, page->index); rcu_read_unlock(); return val != NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells44100.00%1100.00%
Total44100.00%1100.00%

EXPORT_SYMBOL(__fscache_check_page_write); /* * wait for a page to finish being written to the cache */
void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) { wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); wait_event(*wq, !__fscache_check_page_write(cookie, page)); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells43100.00%1100.00%
Total43100.00%1100.00%

EXPORT_SYMBOL(__fscache_wait_on_page_write); /* * wait for a page to finish being written to the cache. Put a timeout here * since we might be called recursively via parent fs. */
static bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) { wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), HZ); }

Contributors

PersonTokensPropCommitsCommitProp
milosz tanskimilosz tanski47100.00%1100.00%
Total47100.00%1100.00%

/* * decide whether a page can be released, possibly by cancelling a store to it * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged */
bool __fscache_maybe_release_page(struct fscache_cookie *cookie, struct page *page, gfp_t gfp) { struct page *xpage; void *val; _enter("%p,%p,%x", cookie, page, gfp); try_again: rcu_read_lock(); val = radix_tree_lookup(&cookie->stores, page->index); if (!val) { rcu_read_unlock(); fscache_stat(&fscache_n_store_vmscan_not_storing); __fscache_uncache_page(cookie, page); return true; } /* see if the page is actually undergoing storage - if so we can't get * rid of it till the cache has finished with it */ if (radix_tree_tag_get(&cookie->stores, page->index, FSCACHE_COOKIE_STORING_TAG)) { rcu_read_unlock(); goto page_busy; } /* the page is pending storage, so we attempt to cancel the store and * discard the store request so that the page can be reclaimed */ spin_lock(&cookie->stores_lock); rcu_read_unlock(); if (radix_tree_tag_get(&cookie->stores, page->index, FSCACHE_COOKIE_STORING_TAG)) { /* the page started to undergo storage whilst we were looking, * so now we can only wait or return */ spin_unlock(&cookie->stores_lock); goto page_busy; } xpage = radix_tree_delete(&cookie->stores, page->index); spin_unlock(&cookie->stores_lock); if (xpage) { fscache_stat(&fscache_n_store_vmscan_cancelled); fscache_stat(&fscache_n_store_radix_deletes); ASSERTCMP(xpage, ==, page); } else { fscache_stat(&fscache_n_store_vmscan_gone); } wake_up_bit(&cookie->flags, 0); if (xpage) put_page(xpage); __fscache_uncache_page(cookie, page); return true; page_busy: /* We will wait here if we're allowed to, but that could deadlock the * allocator as the work threads writing to the cache may all end up * sleeping on memory allocation, so we may need to impose a timeout * too. */ if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { fscache_stat(&fscache_n_store_vmscan_busy); return false; } fscache_stat(&fscache_n_store_vmscan_wait); if (!release_page_wait_timeout(cookie, page)) _debug("fscache writeout timeout page: %p{%lx}", page, page->index); gfp &= ~__GFP_DIRECT_RECLAIM; goto try_again; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells28494.04%350.00%
milosz tanskimilosz tanski154.97%116.67%
mel gormanmel gorman20.66%116.67%
kirill a. shutemovkirill a. shutemov10.33%116.67%
Total302100.00%6100.00%

EXPORT_SYMBOL(__fscache_maybe_release_page); /* * note that a page has finished being written to the cache */
static void fscache_end_page_write(struct fscache_object *object, struct page *page) { struct fscache_cookie *cookie; struct page *xpage = NULL; spin_lock(&object->lock); cookie = object->cookie; if (cookie) { /* delete the page from the tree if it is now no longer * pending */ spin_lock(&cookie->stores_lock); radix_tree_tag_clear(&cookie->stores, page->index, FSCACHE_COOKIE_STORING_TAG); if (!radix_tree_tag_get(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG)) { fscache_stat(&fscache_n_store_radix_deletes); xpage = radix_tree_delete(&cookie->stores, page->index); } spin_unlock(&cookie->stores_lock); wake_up_bit(&cookie->flags, 0); } spin_unlock(&object->lock); if (xpage) put_page(xpage); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells14499.31%480.00%
kirill a. shutemovkirill a. shutemov10.69%120.00%
Total145100.00%5100.00%

/* * actually apply the changed attributes to a cache object */
static void fscache_attr_changed_op(struct fscache_operation *op) { struct fscache_object *object = op->object; int ret; _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); fscache_stat(&fscache_n_attr_changed_calls); if (fscache_object_is_active(object)) { fscache_stat(&fscache_n_cop_attr_changed); ret = object->cache->ops->attr_changed(object); fscache_stat_d(&fscache_n_cop_attr_changed); if (ret < 0) fscache_abort_object(object); } fscache_op_complete(op, true); _leave(""); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells99100.00%5100.00%
Total99100.00%5100.00%

/* * notification that the attributes on an object have changed */
int __fscache_attr_changed(struct fscache_cookie *cookie) { struct fscache_operation *op; struct fscache_object *object; bool wake_cookie = false; _enter("%p", cookie); ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); fscache_stat(&fscache_n_attr_changed); op = kzalloc(sizeof(*op), GFP_KERNEL); if (!op) { fscache_stat(&fscache_n_attr_changed_nomem); _leave(" = -ENOMEM"); return -ENOMEM; } fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL); op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE) | (1 << FSCACHE_OP_UNUSE_COOKIE); spin_lock(&cookie->lock); if (!fscache_cookie_enabled(cookie) || hlist_empty(&cookie->backing_objects)) goto nobufs; object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); __fscache_use_cookie(cookie); if (fscache_submit_exclusive_op(object, op) < 0) goto nobufs_dec; spin_unlock(&cookie->lock); fscache_stat(&fscache_n_attr_changed_ok); fscache_put_operation(op); _leave(" = 0"); return 0; nobufs_dec: wake_cookie = __fscache_unuse_cookie(cookie); nobufs: spin_unlock(&cookie->lock); fscache_put_operation(op); if (wake_cookie) __fscache_wake_unused_cookie(cookie); fscache_stat(&fscache_n_attr_changed_nobufs); _leave(" = %d", -ENOBUFS); return -ENOBUFS; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells24696.47%571.43%
milosz tanskimilosz tanski62.35%114.29%
tejun heotejun heo31.18%114.29%
Total255100.00%7100.00%

EXPORT_SYMBOL(__fscache_attr_changed); /* * Handle cancellation of a pending retrieval op */
static void fscache_do_cancel_retrieval(struct fscache_operation *_op) { struct fscache_retrieval *op = container_of(_op, struct fscache_retrieval, op); atomic_set(&op->n_pages, 0); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells36100.00%1100.00%
Total36100.00%1100.00%

/* * release a retrieval op reference */
static void fscache_release_retrieval_op(struct fscache_operation *_op) { struct fscache_retrieval *op = container_of(_op, struct fscache_retrieval, op); _enter("{OP%x}", op->op.debug_id); ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED, atomic_read(&op->n_pages), ==, 0); fscache_hist(fscache_retrieval_histogram, op->start_time); if (op->context) fscache_put_context(op->cookie, op->context); _leave(""); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells91100.00%4100.00%
Total91100.00%4100.00%

/* * allocate a retrieval op */
static struct fscache_retrieval *fscache_alloc_retrieval( struct fscache_cookie *cookie, struct address_space *mapping, fscache_rw_complete_t end_io_func, void *context) { struct fscache_retrieval *op; /* allocate a retrieval operation and attempt to submit it */ op = kzalloc(sizeof(*op), GFP_NOIO); if (!op) { fscache_stat(&fscache_n_retrievals_nomem); return NULL; } fscache_operation_init(&op->op, NULL, fscache_do_cancel_retrieval, fscache_release_retrieval_op); op->op.flags = FSCACHE_OP_MYTHREAD | (1UL << FSCACHE_OP_WAITING) | (1UL << FSCACHE_OP_UNUSE_COOKIE); op->cookie = cookie; op->mapping = mapping; op->end_io_func = end_io_func; op->context = context; op->start_time = jiffies; INIT_LIST_HEAD(&op->to_do); /* Pin the netfs read context in case we need to do the actual netfs * read because we've encountered a cache read failure. */ if (context) fscache_get_context(op->cookie, context); return op; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells14798.66%480.00%
tejun heotejun heo21.34%120.00%
Total149100.00%5100.00%

/* * wait for a deferred lookup to complete */
int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) { unsigned long jif; _enter(""); if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { _leave(" = 0 [imm]"); return 0; } fscache_stat(&fscache_n_retrievals_wait); jif = jiffies; if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, TASK_INTERRUPTIBLE) != 0) { fscache_stat(&fscache_n_retrievals_intr); _leave(" = -ERESTARTSYS"); return -ERESTARTSYS; } ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); smp_rmb(); fscache_hist(fscache_retrieval_delay_histogram, jif); _leave(" = 0 [dly]"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells117100.00%1100.00%
Total117100.00%1100.00%

/* * wait for an object to become active (or dead) */
int fscache_wait_for_operation_activation(struct fscache_object *object, struct fscache_operation *op, atomic_t *stat_op_waits, atomic_t *stat_object_dead) { int ret; if (!test_bit(FSCACHE_OP_WAITING, &op->flags)) goto check_if_dead; _debug(">>> WT"); if (stat_op_waits) fscache_stat(stat_op_waits); if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, TASK_INTERRUPTIBLE) != 0) { ret = fscache_cancel_op(op, false); if (ret == 0) return -ERESTARTSYS; /* it's been removed from the pending queue by another party, * so we should get to run shortly */ wait_on_bit(&op->flags, FSCACHE_OP_WAITING, TASK_UNINTERRUPTIBLE); } _debug("<<< GO"); check_if_dead: if (op->state == FSCACHE_OP_ST_CANCELLED) { if (stat_object_dead) fscache_stat(stat_object_dead); _leave(" = -ENOBUFS [cancelled]"); return -ENOBUFS; } if (unlikely(fscache_object_is_dying(object) || fscache_cache_is_broken(object))) { enum fscache_operation_state state = op->state; fscache_cancel_op(op, true); if (stat_object_dead) fscache_stat(stat_object_dead); _leave(" = -ENOBUFS [obj dead %d]", state); return -ENOBUFS; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells196100.00%8100.00%
Total196100.00%8100.00%

/* * read a page from the cache or allocate a block in which to store it * - we return: * -ENOMEM - out of memory, nothing done * -ERESTARTSYS - interrupted * -ENOBUFS - no backing object available in which to cache the block * -ENODATA - no data available in the backing object for this block * 0 - dispatched a read - it'll call end_io_func() when finished */
int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, struct page *page, fscache_rw_complete_t end_io_func, void *context, gfp_t gfp) { struct fscache_retrieval *op; struct fscache_object *object; bool wake_cookie = false; int ret; _enter("%p,%p,,,", cookie, page); fscache_stat(&fscache_n_retrievals); if (hlist_empty(&cookie->backing_objects)) goto nobufs; if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { _leave(" = -ENOBUFS [invalidating]"); return -ENOBUFS; } ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERTCMP(page, !=, NULL); if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; op = fscache_alloc_retrieval(cookie, page->mapping, end_io_func, context); if (!op) { _leave(" = -ENOMEM"); return -ENOMEM; } atomic_set(&op->n_pages, 1); spin_lock(&cookie->lock); if (!fscache_cookie_enabled(cookie) || hlist_empty(&cookie->backing_objects)) goto nobufs_unlock; object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); __fscache_use_cookie(cookie); atomic_inc(&object->n_reads); __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); if (fscache_submit_op(object, &op->op) < 0) goto nobufs_unlock_dec; spin_unlock(&cookie->lock); fscache_stat(&fscache_n_retrieval_ops); /* we wait for the operation to become active, and then process it * *here*, in this thread, and not in the thread pool */ ret = fscache_wait_for_operation_activation( object, &op->op, __fscache_stat(&fscache_n_retrieval_op_waits), __fscache_stat(&fscache_n_retrievals_object_dead)); if (ret < 0) goto error; /* ask the cache to honour the operation */ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { fscache_stat(&fscache_n_cop_allocate_page); ret = object->cache->ops->allocate_page(op, page, gfp); fscache_stat_d(&fscache_n_cop_allocate_page); if (ret == 0) ret = -ENODATA; } else { fscache_stat(&fscache_n_cop_read_or_alloc_page); ret = object->cache->ops->read_or_alloc_page(op, page, gfp); fscache_stat_d(&fscache_n_cop_read_or_alloc_page); } error: if (ret == -ENOMEM) fscache_stat(&fscache_n_retrievals_nomem); else if (ret == -ERESTARTSYS) fscache_stat(&fscache_n_retrievals_intr); else if (ret == -ENODATA) fscache_stat(&fscache_n_retrievals_nodata); else if (ret < 0) fscache_stat(&fscache_n_retrievals_nobufs); else fscache_stat(&fscache_n_retrievals_ok); fscache_put_retrieval(op); _leave(" = %d", ret); return ret; nobufs_unlock_dec: atomic_dec(&object->n_reads); wake_cookie = __fscache_unuse_cookie(cookie); nobufs_unlock: spin_unlock(&cookie->lock); if (wake_cookie) __fscache_wake_unused_cookie(cookie); fscache_put_retrieval(op); nobufs: fscache_stat(&fscache_n_retrievals_nobufs); _leave(" = -ENOBUFS"); return -ENOBUFS; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells541100.00%14100.00%
Total541100.00%14100.00%

EXPORT_SYMBOL(__fscache_read_or_alloc_page); /* * read a list of page from the cache or allocate a block in which to store * them * - we return: * -ENOMEM - out of memory, some pages may be being read * -ERESTARTSYS - interrupted, some pages may be being read * -ENOBUFS - no backing object or space available in which to cache any * pages not being read * -ENODATA - no data available in the backing object for some or all of * the pages * 0 - dispatched a read on all pages * * end_io_func() will be called for each page read from the cache as it is * finishes being read * * any pages for which a read is dispatched will be removed from pages and * nr_pages */
int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages, fscache_rw_complete_t end_io_func, void *context, gfp_t gfp) { struct fscache_retrieval *op; struct fscache_object *object; bool wake_cookie = false; int ret; _enter("%p,,%d,,,", cookie, *nr_pages); fscache_stat(&fscache_n_retrievals); if (hlist_empty(&cookie->backing_objects)) goto nobufs; if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { _leave(" = -ENOBUFS [invalidating]"); return -ENOBUFS; } ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERTCMP(*nr_pages, >, 0); ASSERT(!list_empty(pages)); if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); if (!op) return -ENOMEM; atomic_set(&op->n_pages, *nr_pages); spin_lock(&cookie->lock); if (!fscache_cookie_enabled(cookie) || hlist_empty(&cookie->backing_objects)) goto nobufs_unlock; object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); __fscache_use_cookie(cookie); atomic_inc(&object->n_reads); __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); if (fscache_submit_op(object, &op->op) < 0) goto nobufs_unlock_dec; spin_unlock(&cookie->lock); fscache_stat(&fscache_n_retrieval_ops); /* we wait for the operation to become active, and then process it * *here*, in this thread, and not in the thread pool */ ret = fscache_wait_for_operation_activation( object, &op->op, __fscache_stat(&fscache_n_retrieval_op_waits), __fscache_stat(&fscache_n_retrievals_object_dead)); if (ret < 0) goto error; /* ask the cache to honour the operation */ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { fscache_stat(&fscache_n_cop_allocate_pages); ret = object->cache->ops->allocate_pages( op, pages, nr_pages, gfp); fscache_stat_d(&fscache_n_cop_allocate_pages); } else { fscache_stat(&fscache_n_cop_read_or_alloc_pages); ret = object->cache->ops->read_or_alloc_pages( op, pages, nr_pages, gfp); fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); } error: if (ret == -ENOMEM) fscache_stat(&fscache_n_retrievals_nomem); else if (ret == -ERESTARTSYS) fscache_stat(&fscache_n_retrievals_intr); else if (ret == -ENODATA) fscache_stat(&fscache_n_retrievals_nodata); else if (ret < 0) fscache_stat(&fscache_n_retrievals_nobufs); else fscache_stat(&fscache_n_retrievals_ok); fscache_put_retrieval(op); _leave(" = %d", ret); return ret; nobufs_unlock_dec: atomic_dec(&object->n_reads); wake_cookie = __fscache_unuse_cookie(cookie); nobufs_unlock: spin_unlock(&cookie->lock); fscache_put_retrieval(op); if (wake_cookie) __fscache_wake_unused_cookie(cookie); nobufs: fscache_stat(&fscache_n_retrievals_nobufs); _leave(" = -ENOBUFS"); return -ENOBUFS; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells533100.00%13100.00%
Total533100.00%13100.00%

EXPORT_SYMBOL(__fscache_read_or_alloc_pages); /* * allocate a block in the cache on which to store a page * - we return: * -ENOMEM - out of memory, nothing done * -ERESTARTSYS - interrupted * -ENOBUFS - no backing object available in which to cache the block * 0 - block allocated */
int __fscache_alloc_page(struct fscache_cookie *cookie, struct page *page, gfp_t gfp) { struct fscache_retrieval *op; struct fscache_object *object; bool wake_cookie = false; int ret; _enter("%p,%p,,,", cookie, page); fscache_stat(&fscache_n_allocs); if (hlist_empty(&cookie->backing_objects)) goto nobufs; ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERTCMP(page, !=, NULL); if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { _leave(" = -ENOBUFS [invalidating]"); return -ENOBUFS; } if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); if (!op) return -ENOMEM; atomic_set(&op->n_pages, 1); spin_lock(&cookie->lock); if (!fscache_cookie_enabled(cookie) || hlist_empty(&cookie->backing_objects)) goto nobufs_unlock; object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); __fscache_use_cookie(cookie); if (fscache_submit_op(object, &op->op) < 0) goto nobufs_unlock_dec; spin_unlock(