cregit-Linux how code gets into the kernel

Release 4.18 fs/inode.c

Directory: fs
/*
 * (C) 1997 Linus Torvalds
 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
 */
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
#include <linux/prefetch.h>
#include <linux/buffer_head.h> /* for inode_has_buffers */
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
#include <linux/iversion.h>
#include <trace/events/writeback.h>
#include "internal.h"

/*
 * Inode locking rules:
 *
 * inode->i_lock protects:
 *   inode->i_state, inode->i_hash, __iget()
 * Inode LRU list locks protect:
 *   inode->i_sb->s_inode_lru, inode->i_lru
 * inode->i_sb->s_inode_list_lock protects:
 *   inode->i_sb->s_inodes, inode->i_sb_list
 * bdi->wb.list_lock protects:
 *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
 * inode_hash_lock protects:
 *   inode_hashtable, inode->i_hash
 *
 * Lock ordering:
 *
 * inode->i_sb->s_inode_list_lock
 *   inode->i_lock
 *     Inode LRU list locks
 *
 * bdi->wb.list_lock
 *   inode->i_lock
 *
 * inode_hash_lock
 *   inode->i_sb->s_inode_list_lock
 *   inode->i_lock
 *
 * iunique_lock
 *   inode_hash_lock
 */


static unsigned int i_hash_mask __read_mostly;

static unsigned int i_hash_shift __read_mostly;

static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);

/*
 * Empty aops. Can be used for the cases where the user does not
 * define any of the address_space operations.
 */

const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);

/*
 * Statistics gathering..
 */

struct inodes_stat_t inodes_stat;

static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);


static struct kmem_cache *inode_cachep __read_mostly;


static long get_nr_inodes(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_inodes, i); return sum < 0 ? 0 : sum; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin2565.79%133.33%
Dave Chinner1128.95%133.33%
Glauber de Oliveira Costa25.26%133.33%
Total38100.00%3100.00%


static inline long get_nr_inodes_unused(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_unused, i); return sum < 0 ? 0 : sum; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner3692.31%250.00%
Glauber de Oliveira Costa25.13%125.00%
Nicholas Piggin12.56%125.00%
Total39100.00%4100.00%


long get_nr_dirty_inodes(void) { /* not actually dirty inodes, but a wild approximation */ long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); return nr_dirty > 0 ? nr_dirty : 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner2388.46%133.33%
Glauber de Oliveira Costa27.69%133.33%
Nicholas Piggin13.85%133.33%
Total26100.00%3100.00%

/* * Handle nr_inode sysctl */ #ifdef CONFIG_SYSCTL
int proc_nr_inodes(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { inodes_stat.nr_inodes = get_nr_inodes(); inodes_stat.nr_unused = get_nr_inodes_unused(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner5296.30%250.00%
Glauber de Oliveira Costa11.85%125.00%
Joe Perches11.85%125.00%
Total54100.00%4100.00%

#endif
static int no_open(struct inode *inode, struct file *file) { return -ENXIO; }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro20100.00%1100.00%
Total20100.00%1100.00%

/** * inode_init_always - perform inode structure initialisation * @sb: superblock inode belongs to * @inode: inode to initialise * * These are initializations that need to be done on every inode * allocation as the fields are not initialised by slab allocation. */
int inode_init_always(struct super_block *sb, struct inode *inode) { static const struct inode_operations empty_iops; static const struct file_operations no_open_fops = {.open = no_open}; struct address_space *const mapping = &inode->i_data; inode->i_sb = sb; inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; atomic_set(&inode->i_count, 1); inode->i_op = &empty_iops; inode->i_fop = &no_open_fops; inode->__i_nlink = 1; inode->i_opflags = 0; if (sb->s_xattr) inode->i_opflags |= IOP_XATTR; i_uid_write(inode, 0); i_gid_write(inode, 0); atomic_set(&inode->i_writecount, 0); inode->i_size = 0; inode->i_write_hint = WRITE_LIFE_NOT_SET; inode->i_blocks = 0; inode->i_bytes = 0; inode->i_generation = 0; inode->i_pipe = NULL; inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_link = NULL; inode->i_dir_seq = 0; inode->i_rdev = 0; inode->dirtied_when = 0; #ifdef CONFIG_CGROUP_WRITEBACK inode->i_wb_frn_winner = 0; inode->i_wb_frn_avg_time = 0; inode->i_wb_frn_history = 0; #endif if (security_inode_alloc(inode)) goto out; spin_lock_init(&inode->i_lock); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); init_rwsem(&inode->i_rwsem); lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); atomic_set(&inode->i_dio_count, 0); mapping->a_ops = &empty_aops; mapping->host = inode; mapping->flags = 0; mapping->wb_err = 0; atomic_set(&mapping->i_mmap_writable, 0); mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); mapping->private_data = NULL; mapping->writeback_index = 0; inode->i_private = NULL; inode->i_mapping = mapping; INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ #ifdef CONFIG_FS_POSIX_ACL inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; #endif #ifdef CONFIG_FSNOTIFY inode->i_fsnotify_mask = 0; #endif inode->i_flctx = NULL; this_cpu_inc(nr_inodes); return 0; out: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds13331.74%24.76%
Al Viro6214.80%1023.81%
Peter Zijlstra5011.93%12.38%
Andrew Morton419.79%511.90%
Tahsin Erdogan235.49%12.38%
Andreas Gruenbacher122.86%12.38%
Eric Paris112.63%12.38%
Christoph Hellwig102.39%37.14%
David Herrmann102.39%12.38%
Eric W. Biedermann81.91%12.38%
David Chinner81.91%12.38%
Jan Kara61.43%12.38%
Chris Wright61.43%12.38%
Jens Axboe61.43%12.38%
Darrick J. Wong61.43%12.38%
Jeff Layton61.43%12.38%
Mimi Zohar51.19%12.38%
Chris Mason40.95%12.38%
Dave Chinner40.95%12.38%
Theodore Y. Ts'o20.48%12.38%
Miklos Szeredi10.24%12.38%
Rafael Aquini10.24%12.38%
Hugh Dickins10.24%12.38%
Arjan van de Ven10.24%12.38%
Nicholas Piggin10.24%12.38%
Greg Kroah-Hartman10.24%12.38%
Total419100.00%42100.00%

EXPORT_SYMBOL(inode_init_always);
static struct inode *alloc_inode(struct super_block *sb) { struct inode *inode; if (sb->s_op->alloc_inode) inode = sb->s_op->alloc_inode(sb); else inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); if (!inode) return NULL; if (unlikely(inode_init_always(sb, inode))) { if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else kmem_cache_free(inode_cachep, inode); return NULL; } return inode; }

Contributors

PersonTokensPropCommitsCommitProp
David Chinner5754.81%133.33%
Christoph Hellwig4341.35%133.33%
Linus Torvalds43.85%133.33%
Total104100.00%3100.00%


void free_inode_nonrcu(struct inode *inode) { kmem_cache_free(inode_cachep, inode); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin17100.00%1100.00%
Total17100.00%1100.00%

EXPORT_SYMBOL(free_inode_nonrcu);
void __destroy_inode(struct inode *inode) { BUG_ON(inode_has_buffers(inode)); inode_detach_wb(inode); security_inode_free(inode); fsnotify_inode_delete(inode); locks_free_lock_context(inode); if (!inode->i_nlink) { WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); atomic_long_dec(&inode->i_sb->s_remove_count); } #ifdef CONFIG_FS_POSIX_ACL if (inode->i_acl && !is_uncached_acl(inode->i_acl)) posix_acl_release(inode->i_acl); if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) posix_acl_release(inode->i_default_acl); #endif this_cpu_dec(nr_inodes); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro3931.45%16.67%
Miklos Szeredi3427.42%16.67%
Linus Torvalds (pre-git)129.68%213.33%
Andreas Gruenbacher86.45%16.67%
Jeff Layton54.03%16.67%
Eric Paris54.03%16.67%
Tejun Heo54.03%16.67%
Stephen D. Smalley43.23%16.67%
Dave Chinner43.23%16.67%
Eric Sesterhenn / Snakebyte32.42%16.67%
Christoph Hellwig21.61%16.67%
Nicholas Piggin10.81%16.67%
Linus Torvalds10.81%16.67%
Greg Kroah-Hartman10.81%16.67%
Total124100.00%15100.00%

EXPORT_SYMBOL(__destroy_inode);
static void i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(inode_cachep, inode); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin33100.00%1100.00%
Total33100.00%1100.00%


static void destroy_inode(struct inode *inode) { BUG_ON(!list_empty(&inode->i_lru)); __destroy_inode(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else call_rcu(&inode->i_rcu, i_callback); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2236.67%116.67%
Nicholas Piggin1830.00%233.33%
Christoph Hellwig1525.00%233.33%
Linus Torvalds (pre-git)58.33%116.67%
Total60100.00%6100.00%

/** * drop_nlink - directly drop an inode's link count * @inode: inode * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. In cases * where we are attempting to track writes to the * filesystem, a decrement to zero means an imminent * write when the file is truncated and actually unlinked * on the filesystem. */
void drop_nlink(struct inode *inode) { WARN_ON(inode->i_nlink == 0); inode->__i_nlink--; if (!inode->i_nlink) atomic_long_inc(&inode->i_sb->s_remove_count); }

Contributors

PersonTokensPropCommitsCommitProp
Miklos Szeredi41100.00%1100.00%
Total41100.00%1100.00%

EXPORT_SYMBOL(drop_nlink); /** * clear_nlink - directly zero an inode's link count * @inode: inode * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. See * drop_nlink() for why we care about i_nlink hitting zero. */
void clear_nlink(struct inode *inode) { if (inode->i_nlink) { inode->__i_nlink = 0; atomic_long_inc(&inode->i_sb->s_remove_count); } }

Contributors

PersonTokensPropCommitsCommitProp
Miklos Szeredi34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL(clear_nlink); /** * set_nlink - directly set an inode's link count * @inode: inode * @nlink: new nlink (should be non-zero) * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. */
void set_nlink(struct inode *inode, unsigned int nlink) { if (!nlink) { clear_nlink(inode); } else { /* Yes, some filesystems do change nlink from zero to one */ if (inode->i_nlink == 0) atomic_long_dec(&inode->i_sb->s_remove_count); inode->__i_nlink = nlink; } }

Contributors

PersonTokensPropCommitsCommitProp
Miklos Szeredi54100.00%1100.00%
Total54100.00%1100.00%

EXPORT_SYMBOL(set_nlink); /** * inc_nlink - directly increment an inode's link count * @inode: inode * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. Currently, * it is only here for parity with dec_nlink(). */
void inc_nlink(struct inode *inode) { if (unlikely(inode->i_nlink == 0)) { WARN_ON(!(inode->i_state & I_LINKABLE)); atomic_long_dec(&inode->i_sb->s_remove_count); } inode->__i_nlink++; }

Contributors

PersonTokensPropCommitsCommitProp
Miklos Szeredi3570.00%150.00%
Al Viro1530.00%150.00%
Total50100.00%2100.00%

EXPORT_SYMBOL(inc_nlink);
static void __address_space_init_once(struct address_space *mapping) { INIT_RADIX_TREE(&mapping->i_pages, GFP_ATOMIC | __GFP_ACCOUNT); init_rwsem(&mapping->i_mmap_rwsem); INIT_LIST_HEAD(&mapping->private_list); spin_lock_init(&mapping->private_lock); mapping->i_mmap = RB_ROOT_CACHED; }

Contributors

PersonTokensPropCommitsCommitProp
Miklos Szeredi2343.40%17.69%
Linus Torvalds (pre-git)1324.53%215.38%
Nicholas Piggin59.43%215.38%
Davidlohr Bueso A35.66%215.38%
Linus Torvalds35.66%215.38%
Vladimir Davydov23.77%17.69%
Dave Chinner23.77%17.69%
Matthew Wilcox11.89%17.69%
Michel Lespinasse11.89%17.69%
Total53100.00%13100.00%


void address_space_init_once(struct address_space *mapping) { memset(mapping, 0, sizeof(*mapping)); __address_space_init_once(mapping); }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner28100.00%1100.00%
Total28100.00%1100.00%

EXPORT_SYMBOL(address_space_init_once); /* * These are initializations that only need to be done * once, because the fields are idempotent across use * of the inode, so let the slab aware of that. */
void inode_init_once(struct inode *inode) { memset(inode, 0, sizeof(*inode)); INIT_HLIST_NODE(&inode->i_hash); INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_io_list); INIT_LIST_HEAD(&inode->i_wb_list); INIT_LIST_HEAD(&inode->i_lru); __address_space_init_once(&inode->i_data); i_size_ordered_init(inode); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2938.16%545.45%
Miklos Szeredi2634.21%19.09%
Dave Chinner1013.16%327.27%
Ingo Molnar67.89%19.09%
Linus Torvalds (pre-git)56.58%19.09%
Total76100.00%11100.00%

EXPORT_SYMBOL(inode_init_once);
static void init_once(void *foo) { struct inode *inode = (struct inode *) foo; inode_init_once(inode); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2385.19%133.33%
Christoph Lameter311.11%133.33%
Linus Torvalds (pre-git)13.70%133.33%
Total27100.00%3100.00%

/* * inode->i_lock must be held */
void __iget(struct inode *inode) { atomic_inc(&inode->i_count); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1266.67%466.67%
Andrew Morton527.78%116.67%
Nicholas Piggin15.56%116.67%
Total18100.00%6100.00%

/* * get additional reference to inode; caller must already hold one. */
void ihold(struct inode *inode) { WARN_ON(atomic_inc_return(&inode->i_count) < 2); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro23100.00%2100.00%
Total23100.00%2100.00%

EXPORT_SYMBOL(ihold);
static void inode_lru_list_add(struct inode *inode) { if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) this_cpu_inc(nr_unused); else inode->i_state |= I_REFERENCED; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin1435.00%225.00%
Dave Chinner1025.00%337.50%
Josef Whiter717.50%112.50%
Al Viro717.50%112.50%
Linus Torvalds (pre-git)25.00%112.50%
Total40100.00%8100.00%

/* * Add inode to LRU if needed (inode is unused and clean). * * Needs inode->i_lock held. */
void inode_add_lru(struct inode *inode) { if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) && !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE) inode_lru_list_add(inode); }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kara4996.08%133.33%
Linus Torvalds11.96%133.33%
Theodore Y. Ts'o11.96%133.33%
Total51100.00%3100.00%


static void inode_lru_list_del(struct inode *inode) { if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) this_cpu_dec(nr_unused); }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner1442.42%450.00%
Nicholas Piggin1442.42%337.50%
Linus Torvalds (pre-git)515.15%112.50%
Total33100.00%8100.00%

/** * inode_sb_list_add - add inode to the superblock list of inodes * @inode: inode to add */
void inode_sb_list_add(struct inode *inode) { spin_lock(&inode->i_sb->s_inode_list_lock); list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); spin_unlock(&inode->i_sb->s_inode_list_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig2453.33%133.33%
Dave Chinner2146.67%266.67%
Total45100.00%3100.00%

EXPORT_SYMBOL_GPL(inode_sb_list_add);
static inline void inode_sb_list_del(struct inode *inode) { if (!list_empty(&inode->i_sb_list)) { spin_lock(&inode->i_sb->s_inode_list_lock); list_del_init(&inode->i_sb_list); spin_unlock(&inode->i_sb->s_inode_list_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner2139.62%250.00%
Christoph Hellwig1935.85%125.00%
Eric Dumazet1324.53%125.00%
Total53100.00%4100.00%


static unsigned long hash(struct super_block *sb, unsigned long hashval) { unsigned long tmp; tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / L1_CACHE_BYTES; tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); return tmp & i_hash_mask; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner5796.61%150.00%
Christoph Hellwig23.39%150.00%
Total59100.00%2100.00%

/** * __insert_inode_hash - hash an inode * @inode: unhashed inode * @hashval: unsigned long value used to locate this object in the * inode_hashtable. * * Add an inode to the inode hash for this superblock. */
void __insert_inode_hash(struct inode *inode, unsigned long hashval) { struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); spin_lock(&inode_hash_lock); spin_lock(&inode->i_lock); hlist_add_head(&inode->i_hash, b); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner6697.06%375.00%
Christoph Hellwig22.94%125.00%
Total68100.00%4100.00%

EXPORT_SYMBOL(__insert_inode_hash); /** * __remove_inode_hash - remove an inode from the hash * @inode: inode to unhash * * Remove an inode from the superblock. */
void __remove_inode_hash(struct inode *inode) { spin_lock(&inode_hash_lock); spin_lock(&inode->i_lock); hlist_del_init(&inode->i_hash); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner4597.83%375.00%
Eric Dumazet12.17%125.00%
Total46100.00%4100.00%

EXPORT_SYMBOL(__remove_inode_hash);
void clear_inode(struct inode *inode) { /* * We have to cycle the i_pages lock here because reclaim can be in the * process of removing the last page (in __delete_from_page_cache()) * and we must not free the mapping under it. */ xa_lock_irq(&inode->i_data.i_pages); BUG_ON(inode->i_data.nrpages); BUG_ON(inode->i_data.nrexceptional); xa_unlock_irq(&inode->i_data.i_pages); BUG_ON(!list_empty(&inode->i_data.private_list)); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); BUG_ON(!list_empty(&inode->i_wb_list)); /* don't need i_lock here, no concurrent mods to i_state */ inode->i_state = I_FREEING | I_CLEAR; }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro6158.10%112.50%
Jan Kara1716.19%225.00%
Dave Chinner1211.43%112.50%
Johannes Weiner87.62%112.50%
Matthew Wilcox54.76%112.50%
Ross Zwisler10.95%112.50%
Nicholas Piggin10.95%112.50%
Total105100.00%8100.00%

EXPORT_SYMBOL(clear_inode); /* * Free the inode passed in, removing it from the lists it is still connected * to. We remove any pages still attached to the inode and wait for any IO that * is still in progress before finally destroying the inode. * * An inode must already be marked I_FREEING so that we avoid the inode being * moved back onto lists if we race with other code that manipulates the lists * (e.g. writeback_single_inode). The caller is responsible for setting this. * * An inode must already be removed from the LRU list before being evicted from * the cache. This should occur atomically with setting the I_FREEING state * flag, so no inodes here should ever be on the LRU when being evicted. */
static void evict(struct inode *inode) { const struct super_operations *op = inode->i_sb->s_op; BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(!list_empty(&inode->i_lru)); if (!list_empty(&inode->i_io_list)) inode_io_list_del(inode); inode_sb_list_del(inode); /* * Wait for flusher thread to be done with the inode so that filesystem * does not start destroying it while writeback is still running. Since * the inode has I_FREEING set, flusher thread won't start new work on * the inode. We just have to wait for running writeback to finish. */ inode_wait_for_writeback(inode); if (op->evict_inode) { op->evict_inode(inode); } else { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); } if (S_ISBLK(inode->i_mode) && inode->i_bdev) bd_forget(inode); if (S_ISCHR(inode->i_mode) && inode->i_cdev) cd_forget(inode); remove_inode_hash(inode); spin_lock(&inode->i_lock); wake_up_bit(&inode->i_state, __I_NEW); BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); spin_unlock(&inode->i_lock); destroy_inode(inode); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro8846.32%433.33%
Dave Chinner8444.21%325.00%
Eric Dumazet105.26%18.33%
Jan Kara73.68%325.00%
Johannes Weiner10.53%18.33%
Total190100.00%12100.00%

/* * dispose_list - dispose of the contents of a local list * @head: the head of the list to free * * Dispose-list gets a local list with local inodes in it, so it doesn't * need to worry about list corruption and SMP locks. */
static void dispose_list(struct list_head *head) { while (!list_empty(head)) { struct inode *inode; inode = list_first_entry(head, struct inode, i_lru); list_del_init(&inode->i_lru); evict(inode); cond_resched(); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3055.56%440.00%
Andrew Morton1629.63%110.00%
Josef Bacik35.56%110.00%
Nicholas Piggin35.56%220.00%
Al Viro11.85%110.00%
Pavel Emelyanov11.85%110.00%
Total54100.00%10100.00%

/** * evict_inodes - evict all evictable inodes for a superblock * @sb: superblock to operate on * * Make sure that no inodes with zero refcount are retained. This is * called by superblock shutdown after having SB_ACTIVE flag removed, * so any inode reaching zero refcount during or after that call will * be immediately evicted. */
void evict_inodes(struct super_block *sb) { struct inode *inode, *next; LIST_HEAD(dispose); again: spin_lock(&sb->s_inode_list_lock); list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { if (atomic_read(&inode->i_count)) continue; spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); continue; } inode->i_state |= I_FREEING; inode_lru_list_del(inode); spin_unlock(&inode->i_lock); list_add(&inode->i_lru, &dispose); /* * We can have a ton of inodes to evict at unmount time given * enough memory, check to see if we need to go to sleep for a * bit so we don't livelock. */ if (need_resched()) { spin_unlock(&sb->s_inode_list_lock); cond_resched(); dispose_list(&dispose); goto again; } } spin_unlock(&sb->s_inode_list_lock); dispose_list(&dispose); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro4829.81%17.14%
Dave Chinner4427.33%321.43%
Josef Bacik3018.63%17.14%
Linus Torvalds (pre-git)2817.39%750.00%
Nicholas Piggin116.83%214.29%
Total161100.00%14100.00%

EXPORT_SYMBOL_GPL(evict_inodes); /** * invalidate_inodes - attempt to free all inodes on a superblock * @sb: superblock to operate on * @kill_dirty: flag to guide handling of dirty inodes * * Attempts to free all inodes for a given superblock. If there were any * busy inodes return a non-zero value, else zero. * If @kill_dirty is set, discard dirty inodes too, otherwise treat * them as busy. */
int invalidate_inodes(struct super_block *sb, bool kill_dirty) { int busy = 0; struct inode *inode, *next; LIST_HEAD(dispose); spin_lock(&sb->s_inode_list_lock); list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); continue; } if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { spin_unlock(&inode->i_lock); busy = 1; continue; } if (atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); busy = 1; continue; } inode->i_state |= I_FREEING; inode_lru_list_del(inode); spin_unlock(&inode->i_lock); list_add(&inode->i_lru, &dispose); } spin_unlock(&sb->s_inode_list_lock); dispose_list(&dispose); return busy; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Chinner6234.07%312.50%
Linus Torvalds (pre-git)5329.12%1041.67%
Neil Brown2010.99%14.17%
Christoph Hellwig2010.99%312.50%
Nicholas Piggin158.24%312.50%
Al Viro63.30%14.17%
Eric Paris42.20%14.17%
Andrew Morton10.55%14.17%
Theodore Y. Ts'o10.55%14.17%
Total182100.00%24100.00%

/* * Isolate the inode from the LRU in preparation for freeing it. * * Any inodes which are pinned purely because of attached pagecache have their * pagecache removed. If the inode has metadata buffers attached to * mapping->private_list then try to remove them. * * If the inode has the I_REFERENCED flag set, then it means that it has been * used recently - the flag is set in iput_final(). When we encounter such an * inode, clear the flag and move it to the back of the LRU so it gets another * pass through the LRU before it gets reclaimed. This is necessary because of * the fact we are doing lazy LRU updates to minimise lock contention so the * LRU does not have strict ordering. Hence we don't want to reclaim inodes * with this flag set because they are the inodes that are out of order. */
static enum lru_status inode_lru_isolate(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *freeable = arg; struct inode *inode = container_of(item, struct inode, i_lru); /* * we are inverting the lru lock/inode->i_lock here, so use a trylock. * If we fail to get the lock, just skip it. */ if (!spin_trylock(&inode->i_lock)) return LRU_SKIP