Release 4.7 fs/inode.c
/*
* (C) 1997 Linus Torvalds
* (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
*/
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
#include <linux/prefetch.h>
#include <linux/buffer_head.h> /* for inode_has_buffers */
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
#include <trace/events/writeback.h>
#include "internal.h"
/*
* Inode locking rules:
*
* inode->i_lock protects:
* inode->i_state, inode->i_hash, __iget()
* Inode LRU list locks protect:
* inode->i_sb->s_inode_lru, inode->i_lru
* inode->i_sb->s_inode_list_lock protects:
* inode->i_sb->s_inodes, inode->i_sb_list
* bdi->wb.list_lock protects:
* bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
* inode_hash_lock protects:
* inode_hashtable, inode->i_hash
*
* Lock ordering:
*
* inode->i_sb->s_inode_list_lock
* inode->i_lock
* Inode LRU list locks
*
* bdi->wb.list_lock
* inode->i_lock
*
* inode_hash_lock
* inode->i_sb->s_inode_list_lock
* inode->i_lock
*
* iunique_lock
* inode_hash_lock
*/
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
/*
* Empty aops. Can be used for the cases where the user does not
* define any of the address_space operations.
*/
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);
/*
* Statistics gathering..
*/
struct inodes_stat_t inodes_stat;
static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);
static struct kmem_cache *inode_cachep __read_mostly;
static long get_nr_inodes(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_inodes, i);
return sum < 0 ? 0 : sum;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 25 | 65.79% | 1 | 33.33% |
dave chinner | dave chinner | 11 | 28.95% | 1 | 33.33% |
glauber costa | glauber costa | 2 | 5.26% | 1 | 33.33% |
| Total | 38 | 100.00% | 3 | 100.00% |
static inline long get_nr_inodes_unused(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_unused, i);
return sum < 0 ? 0 : sum;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 36 | 92.31% | 2 | 50.00% |
glauber costa | glauber costa | 2 | 5.13% | 1 | 25.00% |
nick piggin | nick piggin | 1 | 2.56% | 1 | 25.00% |
| Total | 39 | 100.00% | 4 | 100.00% |
long get_nr_dirty_inodes(void)
{
/* not actually dirty inodes, but a wild approximation */
long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
return nr_dirty > 0 ? nr_dirty : 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 23 | 88.46% | 1 | 33.33% |
glauber costa | glauber costa | 2 | 7.69% | 1 | 33.33% |
nick piggin | nick piggin | 1 | 3.85% | 1 | 33.33% |
| Total | 26 | 100.00% | 3 | 100.00% |
/*
* Handle nr_inode sysctl
*/
#ifdef CONFIG_SYSCTL
int proc_nr_inodes(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
inodes_stat.nr_inodes = get_nr_inodes();
inodes_stat.nr_unused = get_nr_inodes_unused();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 52 | 96.30% | 2 | 50.00% |
joe perches | joe perches | 1 | 1.85% | 1 | 25.00% |
glauber costa | glauber costa | 1 | 1.85% | 1 | 25.00% |
| Total | 54 | 100.00% | 4 | 100.00% |
#endif
static int no_open(struct inode *inode, struct file *file)
{
return -ENXIO;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 20 | 100.00% | 1 | 100.00% |
| Total | 20 | 100.00% | 1 | 100.00% |
/**
* inode_init_always - perform inode structure intialisation
* @sb: superblock inode belongs to
* @inode: inode to initialise
*
* These are initializations that need to be done on every inode
* allocation as the fields are not initialised by slab allocation.
*/
int inode_init_always(struct super_block *sb, struct inode *inode)
{
static const struct inode_operations empty_iops;
static const struct file_operations no_open_fops = {.open = no_open};
struct address_space *const mapping = &inode->i_data;
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
inode->i_fop = &no_open_fops;
inode->__i_nlink = 1;
inode->i_opflags = 0;
i_uid_write(inode, 0);
i_gid_write(inode, 0);
atomic_set(&inode->i_writecount, 0);
inode->i_size = 0;
inode->i_blocks = 0;
inode->i_bytes = 0;
inode->i_generation = 0;
inode->i_pipe = NULL;
inode->i_bdev = NULL;
inode->i_cdev = NULL;
inode->i_link = NULL;
inode->i_dir_seq = 0;
inode->i_rdev = 0;
inode->dirtied_when = 0;
#ifdef CONFIG_CGROUP_WRITEBACK
inode->i_wb_frn_winner = 0;
inode->i_wb_frn_avg_time = 0;
inode->i_wb_frn_history = 0;
#endif
if (security_inode_alloc(inode))
goto out;
spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
init_rwsem(&inode->i_rwsem);
lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
atomic_set(&inode->i_dio_count, 0);
mapping->a_ops = &empty_aops;
mapping->host = inode;
mapping->flags = 0;
atomic_set(&mapping->i_mmap_writable, 0);
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL;
mapping->writeback_index = 0;
inode->i_private = NULL;
inode->i_mapping = mapping;
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
#ifdef CONFIG_FS_POSIX_ACL
inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
#endif
inode->i_flctx = NULL;
this_cpu_inc(nr_inodes);
return 0;
out:
return -ENOMEM;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 133 | 33.67% | 2 | 5.13% |
al viro | al viro | 62 | 15.70% | 10 | 25.64% |
peter zijlstra | peter zijlstra | 50 | 12.66% | 1 | 2.56% |
andrew morton | andrew morton | 41 | 10.38% | 5 | 12.82% |
tahsin erdogan | tahsin erdogan | 23 | 5.82% | 1 | 2.56% |
eric paris | eric paris | 11 | 2.78% | 1 | 2.56% |
david herrmann | david herrmann | 10 | 2.53% | 1 | 2.56% |
christoph hellwig | christoph hellwig | 10 | 2.53% | 3 | 7.69% |
eric w. biederman | eric w. biederman | 8 | 2.03% | 1 | 2.56% |
david chinner | david chinner | 8 | 2.03% | 1 | 2.56% |
chris wright | chris wright | 6 | 1.52% | 1 | 2.56% |
jeff layton | jeff layton | 6 | 1.52% | 1 | 2.56% |
jan kara | jan kara | 6 | 1.52% | 1 | 2.56% |
mimi zohar | mimi zohar | 5 | 1.27% | 1 | 2.56% |
chris mason | chris mason | 4 | 1.01% | 1 | 2.56% |
dave chinner | dave chinner | 4 | 1.01% | 1 | 2.56% |
theodore tso | theodore tso | 2 | 0.51% | 1 | 2.56% |
arjan van de ven | arjan van de ven | 1 | 0.25% | 1 | 2.56% |
rafael aquini | rafael aquini | 1 | 0.25% | 1 | 2.56% |
hugh dickins | hugh dickins | 1 | 0.25% | 1 | 2.56% |
miklos szeredi | miklos szeredi | 1 | 0.25% | 1 | 2.56% |
greg kroah-hartman | greg kroah-hartman | 1 | 0.25% | 1 | 2.56% |
nick piggin | nick piggin | 1 | 0.25% | 1 | 2.56% |
| Total | 395 | 100.00% | 39 | 100.00% |
EXPORT_SYMBOL(inode_init_always);
static struct inode *alloc_inode(struct super_block *sb)
{
struct inode *inode;
if (sb->s_op->alloc_inode)
inode = sb->s_op->alloc_inode(sb);
else
inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
if (!inode)
return NULL;
if (unlikely(inode_init_always(sb, inode))) {
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
else
kmem_cache_free(inode_cachep, inode);
return NULL;
}
return inode;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david chinner | david chinner | 57 | 54.81% | 1 | 33.33% |
christoph hellwig | christoph hellwig | 43 | 41.35% | 1 | 33.33% |
linus torvalds | linus torvalds | 4 | 3.85% | 1 | 33.33% |
| Total | 104 | 100.00% | 3 | 100.00% |
void free_inode_nonrcu(struct inode *inode)
{
kmem_cache_free(inode_cachep, inode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 17 | 100.00% | 1 | 100.00% |
| Total | 17 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(free_inode_nonrcu);
void __destroy_inode(struct inode *inode)
{
BUG_ON(inode_has_buffers(inode));
inode_detach_wb(inode);
security_inode_free(inode);
fsnotify_inode_delete(inode);
locks_free_lock_context(inode);
if (!inode->i_nlink) {
WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
atomic_long_dec(&inode->i_sb->s_remove_count);
}
#ifdef CONFIG_FS_POSIX_ACL
if (inode->i_acl && !is_uncached_acl(inode->i_acl))
posix_acl_release(inode->i_acl);
if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
posix_acl_release(inode->i_default_acl);
#endif
this_cpu_dec(nr_inodes);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 39 | 31.45% | 1 | 6.67% |
miklos szeredi | miklos szeredi | 34 | 27.42% | 1 | 6.67% |
pre-git | pre-git | 12 | 9.68% | 2 | 13.33% |
andreas gruenbacher | andreas gruenbacher | 8 | 6.45% | 1 | 6.67% |
eric paris | eric paris | 5 | 4.03% | 1 | 6.67% |
jeff layton | jeff layton | 5 | 4.03% | 1 | 6.67% |
tejun heo | tejun heo | 5 | 4.03% | 1 | 6.67% |
stephen d. smalley | stephen d. smalley | 4 | 3.23% | 1 | 6.67% |
dave chinner | dave chinner | 4 | 3.23% | 1 | 6.67% |
eric sesterhenn | eric sesterhenn | 3 | 2.42% | 1 | 6.67% |
christoph hellwig | christoph hellwig | 2 | 1.61% | 1 | 6.67% |
linus torvalds | linus torvalds | 1 | 0.81% | 1 | 6.67% |
greg kroah-hartman | greg kroah-hartman | 1 | 0.81% | 1 | 6.67% |
nick piggin | nick piggin | 1 | 0.81% | 1 | 6.67% |
| Total | 124 | 100.00% | 15 | 100.00% |
EXPORT_SYMBOL(__destroy_inode);
static void i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(inode_cachep, inode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 33 | 100.00% | 1 | 100.00% |
| Total | 33 | 100.00% | 1 | 100.00% |
static void destroy_inode(struct inode *inode)
{
BUG_ON(!list_empty(&inode->i_lru));
__destroy_inode(inode);
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
else
call_rcu(&inode->i_rcu, i_callback);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 22 | 36.67% | 1 | 16.67% |
nick piggin | nick piggin | 18 | 30.00% | 2 | 33.33% |
christoph hellwig | christoph hellwig | 15 | 25.00% | 2 | 33.33% |
pre-git | pre-git | 5 | 8.33% | 1 | 16.67% |
| Total | 60 | 100.00% | 6 | 100.00% |
/**
* drop_nlink - directly drop an inode's link count
* @inode: inode
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink. In cases
* where we are attempting to track writes to the
* filesystem, a decrement to zero means an imminent
* write when the file is truncated and actually unlinked
* on the filesystem.
*/
void drop_nlink(struct inode *inode)
{
WARN_ON(inode->i_nlink == 0);
inode->__i_nlink--;
if (!inode->i_nlink)
atomic_long_inc(&inode->i_sb->s_remove_count);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 41 | 100.00% | 1 | 100.00% |
| Total | 41 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(drop_nlink);
/**
* clear_nlink - directly zero an inode's link count
* @inode: inode
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink. See
* drop_nlink() for why we care about i_nlink hitting zero.
*/
void clear_nlink(struct inode *inode)
{
if (inode->i_nlink) {
inode->__i_nlink = 0;
atomic_long_inc(&inode->i_sb->s_remove_count);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(clear_nlink);
/**
* set_nlink - directly set an inode's link count
* @inode: inode
* @nlink: new nlink (should be non-zero)
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink.
*/
void set_nlink(struct inode *inode, unsigned int nlink)
{
if (!nlink) {
clear_nlink(inode);
} else {
/* Yes, some filesystems do change nlink from zero to one */
if (inode->i_nlink == 0)
atomic_long_dec(&inode->i_sb->s_remove_count);
inode->__i_nlink = nlink;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 54 | 100.00% | 1 | 100.00% |
| Total | 54 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(set_nlink);
/**
* inc_nlink - directly increment an inode's link count
* @inode: inode
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink. Currently,
* it is only here for parity with dec_nlink().
*/
void inc_nlink(struct inode *inode)
{
if (unlikely(inode->i_nlink == 0)) {
WARN_ON(!(inode->i_state & I_LINKABLE));
atomic_long_dec(&inode->i_sb->s_remove_count);
}
inode->__i_nlink++;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 35 | 70.00% | 1 | 50.00% |
al viro | al viro | 15 | 30.00% | 1 | 50.00% |
| Total | 50 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(inc_nlink);
void address_space_init_once(struct address_space *mapping)
{
memset(mapping, 0, sizeof(*mapping));
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
spin_lock_init(&mapping->tree_lock);
init_rwsem(&mapping->i_mmap_rwsem);
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
mapping->i_mmap = RB_ROOT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 30 | 42.25% | 1 | 11.11% |
pre-git | pre-git | 29 | 40.85% | 2 | 22.22% |
nick piggin | nick piggin | 5 | 7.04% | 2 | 22.22% |
linus torvalds | linus torvalds | 3 | 4.23% | 2 | 22.22% |
davidlohr bueso | davidlohr bueso | 2 | 2.82% | 1 | 11.11% |
michel lespinasse | michel lespinasse | 2 | 2.82% | 1 | 11.11% |
| Total | 71 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(address_space_init_once);
/*
* These are initializations that only need to be done
* once, because the fields are idempotent across use
* of the inode, so let the slab aware of that.
*/
void inode_init_once(struct inode *inode)
{
memset(inode, 0, sizeof(*inode));
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_devices);
INIT_LIST_HEAD(&inode->i_io_list);
INIT_LIST_HEAD(&inode->i_lru);
address_space_init_once(&inode->i_data);
i_size_ordered_init(inode);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 29 | 35.80% | 6 | 46.15% |
miklos szeredi | miklos szeredi | 27 | 33.33% | 1 | 7.69% |
eric paris | eric paris | 11 | 13.58% | 2 | 15.38% |
ingo molnar | ingo molnar | 6 | 7.41% | 1 | 7.69% |
pre-git | pre-git | 5 | 6.17% | 1 | 7.69% |
robert love | robert love | 2 | 2.47% | 1 | 7.69% |
dave chinner | dave chinner | 1 | 1.23% | 1 | 7.69% |
| Total | 81 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(inode_init_once);
static void init_once(void *foo)
{
struct inode *inode = (struct inode *) foo;
inode_init_once(inode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 23 | 85.19% | 1 | 33.33% |
christoph lameter | christoph lameter | 3 | 11.11% | 1 | 33.33% |
pre-git | pre-git | 1 | 3.70% | 1 | 33.33% |
| Total | 27 | 100.00% | 3 | 100.00% |
/*
* inode->i_lock must be held
*/
void __iget(struct inode *inode)
{
atomic_inc(&inode->i_count);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 12 | 66.67% | 4 | 66.67% |
andrew morton | andrew morton | 5 | 27.78% | 1 | 16.67% |
nick piggin | nick piggin | 1 | 5.56% | 1 | 16.67% |
| Total | 18 | 100.00% | 6 | 100.00% |
/*
* get additional reference to inode; caller must already hold one.
*/
void ihold(struct inode *inode)
{
WARN_ON(atomic_inc_return(&inode->i_count) < 2);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 23 | 100.00% | 2 | 100.00% |
| Total | 23 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(ihold);
static void inode_lru_list_add(struct inode *inode)
{
if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 10 | 30.30% | 3 | 33.33% |
al viro | al viro | 9 | 27.27% | 1 | 11.11% |
nick piggin | nick piggin | 8 | 24.24% | 2 | 22.22% |
pre-git | pre-git | 4 | 12.12% | 2 | 22.22% |
andrew morton | andrew morton | 2 | 6.06% | 1 | 11.11% |
| Total | 33 | 100.00% | 9 | 100.00% |
/*
* Add inode to LRU if needed (inode is unused and clean).
*
* Needs inode->i_lock held.
*/
void inode_add_lru(struct inode *inode)
{
if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
I_FREEING | I_WILL_FREE)) &&
!atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
inode_lru_list_add(inode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jan kara | jan kara | 50 | 98.04% | 1 | 50.00% |
theodore tso | theodore tso | 1 | 1.96% | 1 | 50.00% |
| Total | 51 | 100.00% | 2 | 100.00% |
static void inode_lru_list_del(struct inode *inode)
{
if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 14 | 42.42% | 4 | 40.00% |
nick piggin | nick piggin | 14 | 42.42% | 3 | 30.00% |
pre-git | pre-git | 4 | 12.12% | 2 | 20.00% |
andrew morton | andrew morton | 1 | 3.03% | 1 | 10.00% |
| Total | 33 | 100.00% | 10 | 100.00% |
/**
* inode_sb_list_add - add inode to the superblock list of inodes
* @inode: inode to add
*/
void inode_sb_list_add(struct inode *inode)
{
spin_lock(&inode->i_sb->s_inode_list_lock);
list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
spin_unlock(&inode->i_sb->s_inode_list_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 29 | 64.44% | 1 | 33.33% |
dave chinner | dave chinner | 16 | 35.56% | 2 | 66.67% |
| Total | 45 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(inode_sb_list_add);
static inline void inode_sb_list_del(struct inode *inode)
{
if (!list_empty(&inode->i_sb_list)) {
spin_lock(&inode->i_sb->s_inode_list_lock);
list_del_init(&inode->i_sb_list);
spin_unlock(&inode->i_sb->s_inode_list_lock);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 21 | 39.62% | 2 | 50.00% |
christoph hellwig | christoph hellwig | 19 | 35.85% | 1 | 25.00% |
eric dumazet | eric dumazet | 13 | 24.53% | 1 | 25.00% |
| Total | 53 | 100.00% | 4 | 100.00% |
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
return tmp & i_hash_mask;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 57 | 96.61% | 1 | 50.00% |
christoph hellwig | christoph hellwig | 2 | 3.39% | 1 | 50.00% |
| Total | 59 | 100.00% | 2 | 100.00% |
/**
* __insert_inode_hash - hash an inode
* @inode: unhashed inode
* @hashval: unsigned long value used to locate this object in the
* inode_hashtable.
*
* Add an inode to the inode hash for this superblock.
*/
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
hlist_add_head(&inode->i_hash, b);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 66 | 97.06% | 3 | 75.00% |
christoph hellwig | christoph hellwig | 2 | 2.94% | 1 | 25.00% |
| Total | 68 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(__insert_inode_hash);
/**
* __remove_inode_hash - remove an inode from the hash
* @inode: inode to unhash
*
* Remove an inode from the superblock.
*/
void __remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
hlist_del_init(&inode->i_hash);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 45 | 97.83% | 3 | 75.00% |
eric dumazet | eric dumazet | 1 | 2.17% | 1 | 25.00% |
| Total | 46 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(__remove_inode_hash);
void clear_inode(struct inode *inode)
{
might_sleep();
/*
* We have to cycle tree_lock here because reclaim can be still in the
* process of removing the last page (in __delete_from_page_cache())
* and we must not free mapping under it.
*/
spin_lock_irq(&inode->i_data.tree_lock);
BUG_ON(inode->i_data.nrpages);
BUG_ON(inode->i_data.nrexceptional);
spin_unlock_irq(&inode->i_data.tree_lock);
BUG_ON(!list_empty(&inode->i_data.private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
/* don't need i_lock here, no concurrent mods to i_state */
inode->i_state = I_FREEING | I_CLEAR;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 64 | 66.67% | 1 | 16.67% |
jan kara | jan kara | 22 | 22.92% | 2 | 33.33% |
johannes weiner | johannes weiner | 8 | 8.33% | 1 | 16.67% |
ross zwisler | ross zwisler | 1 | 1.04% | 1 | 16.67% |
nick piggin | nick piggin | 1 | 1.04% | 1 | 16.67% |
| Total | 96 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(clear_inode);
/*
* Free the inode passed in, removing it from the lists it is still connected
* to. We remove any pages still attached to the inode and wait for any IO that
* is still in progress before finally destroying the inode.
*
* An inode must already be marked I_FREEING so that we avoid the inode being
* moved back onto lists if we race with other code that manipulates the lists
* (e.g. writeback_single_inode). The caller is responsible for setting this.
*
* An inode must already be removed from the LRU list before being evicted from
* the cache. This should occur atomically with setting the I_FREEING state
* flag, so no inodes here should ever be on the LRU when being evicted.
*/
static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
if (!list_empty(&inode->i_io_list))
inode_io_list_del(inode);
inode_sb_list_del(inode);
/*
* Wait for flusher thread to be done with the inode so that filesystem
* does not start destroying it while writeback is still running. Since
* the inode has I_FREEING set, flusher thread won't start new work on
* the inode. We just have to wait for running writeback to finish.
*/
inode_wait_for_writeback(inode);
if (op->evict_inode) {
op->evict_inode(inode);
} else {
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
}
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
remove_inode_hash(inode);
spin_lock(&inode->i_lock);
wake_up_bit(&inode->i_state, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
spin_unlock(&inode->i_lock);
destroy_inode(inode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 88 | 46.32% | 4 | 33.33% |
dave chinner | dave chinner | 84 | 44.21% | 3 | 25.00% |
eric dumazet | eric dumazet | 10 | 5.26% | 1 | 8.33% |
jan kara | jan kara | 7 | 3.68% | 3 | 25.00% |
johannes weiner | johannes weiner | 1 | 0.53% | 1 | 8.33% |
| Total | 190 | 100.00% | 12 | 100.00% |
/*
* dispose_list - dispose of the contents of a local list
* @head: the head of the list to free
*
* Dispose-list gets a local list with local inodes in it, so it doesn't
* need to worry about list corruption and SMP locks.
*/
static void dispose_list(struct list_head *head)
{
while (!list_empty(head)) {
struct inode *inode;
inode = list_first_entry(head, struct inode, i_lru);
list_del_init(&inode->i_lru);
evict(inode);
cond_resched();
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 30 | 55.56% | 4 | 40.00% |
andrew morton | andrew morton | 16 | 29.63% | 1 | 10.00% |
josef bacik | josef bacik | 3 | 5.56% | 1 | 10.00% |
nick piggin | nick piggin | 3 | 5.56% | 2 | 20.00% |
al viro | al viro | 1 | 1.85% | 1 | 10.00% |
pavel emelianov | pavel emelianov | 1 | 1.85% | 1 | 10.00% |
| Total | 54 | 100.00% | 10 | 100.00% |
/**
* evict_inodes - evict all evictable inodes for a superblock
* @sb: superblock to operate on
*
* Make sure that no inodes with zero refcount are retained. This is
* called by superblock shutdown after having MS_ACTIVE flag removed,
* so any inode reaching zero refcount during or after that call will
* be immediately evicted.
*/
void evict_inodes(struct super_block *sb)
{
struct inode *inode, *next;
LIST_HEAD(dispose);
again:
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (atomic_read(&inode->i_count))
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
continue;
}
inode->i_state |= I_FREEING;
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
/*
* We can have a ton of inodes to evict at unmount time given
* enough memory, check to see if we need to go to sleep for a
* bit so we don't livelock.
*/
if (need_resched()) {
spin_unlock(&sb->s_inode_list_lock);
cond_resched();
dispose_list(&dispose);
goto again;
}
}
spin_unlock(&sb->s_inode_list_lock);
dispose_list(&dispose);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 48 | 29.81% | 1 | 7.14% |
dave chinner | dave chinner | 44 | 27.33% | 3 | 21.43% |
josef bacik | josef bacik | 30 | 18.63% | 1 | 7.14% |
pre-git | pre-git | 28 | 17.39% | 7 | 50.00% |
nick piggin | nick piggin | 11 | 6.83% | 2 | 14.29% |
| Total | 161 | 100.00% | 14 | 100.00% |
/**
* invalidate_inodes - attempt to free all inodes on a superblock
* @sb: superblock to operate on
* @kill_dirty: flag to guide handling of dirty inodes
*
* Attempts to free all inodes for a given superblock. If there were any
* busy inodes return a non-zero value, else zero.
* If @kill_dirty is set, discard dirty inodes too, otherwise treat
* them as busy.
*/
int invalidate_inodes(struct super_block *sb, bool kill_dirty)
{
int busy = 0;
struct inode *inode, *next;
LIST_HEAD(dispose);
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
continue;
}
if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
if (atomic_read(&inode->i_count)) {
spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
inode->i_state |= I_FREEING;
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
}
spin_unlock(&sb->s_inode_list_lock);
dispose_list(&dispose);
return busy;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 62 | 34.07% | 3 | 13.04% |
pre-git | pre-git | 53 | 29.12% | 9 | 39.13% |
neil brown | neil brown | 20 | 10.99% | 1 | 4.35% |
christoph hellwig | christoph hellwig | 20 | 10.99% | 3 | 13.04% |
nick piggin | nick piggin | 15 | 8.24% | 3 | 13.04% |
al viro | al viro | 6 | 3.30% | 1 | 4.35% |
eric paris | eric paris | 4 | 2.20% | 1 | 4.35% |
theodore tso | theodore tso | 1 | 0.55% | 1 | 4.35% |
andrew morton | andrew morton | 1 | 0.55% | 1 | 4.35% |
| Total | 182 | 100.00% | 23 | 100.00% |
/*
* Isolate the inode from the LRU in preparation for freeing it.
*
* Any inodes which are pinned purely because of attached pagecache have their
* pagecache removed. If the inode has metadata buffers attached to
* mapping->private_list then try to remove them.
*
* If the inode has the I_REFERENCED flag set, then it means that it has been
* used recently - the flag is set in iput_final(). When we encounter such an
* inode, clear the flag and move it to the back of the LRU so it gets another
* pass through the LRU before it gets reclaimed. This is necessary because of
* the fact we are doing lazy LRU updates to minimise lock contention so the
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
* with this flag set because they are the inodes that are out of order.
*/
static enum lru_status inode_lru_isolate(struct list_head *item,
struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *freeable = arg;
struct inode *inode = container_of(item, struct inode, i_lru);
/*
* we are inverting the lru lock/inode->i_lock here, so use a trylock.
* If we fail to get the lock, just skip it.
*/
if (!spin_trylock(&inode->i_lock))
return LRU_SKIP;
/*
* Referenced or dirty inodes are still in use. Give them another pass
* through the LRU as we canot reclaim them now.
*/
if (atomic_read(&inode->i_count) ||
(inode->i_state & ~I_REFERENCED)) {
list_lru_isolate(lru, &inode->i_lru);
spin_unlock(&inode->i_lock);
this_cpu_dec(nr_unused);
return LRU_REMOVED;
}
/* recently referenced inodes get one more pass */
if (inode->i_state & I_REFERENCED) {
inode->i_state &= ~I_REFERENCED;
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
}
if (inode_has_buffers(inode) || inode->i_data.nrpages) {
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(lru_lock);
if (remove_inode_buffers(inode)) {
unsigned long reap;
reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap);
else
__count_vm_events(PGINODESTEAL, reap);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += reap;
}
iput(inode);
spin_lock(lru_lock);
return LRU_RETRY;
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
list_lru_isolate_move(lru, &inode->i_lru, freeable);
spin_unlock(&inode->i_lock);
this_cpu_dec