cregit-Linux how code gets into the kernel

Release 4.7 fs/dcache.c

Directory: fs
/*
 * fs/dcache.c
 *
 * Complete reimplementation
 * (C) 1997 Thomas Schoebel-Theuer,
 * with heavy changes by Linus Torvalds
 */

/*
 * Notes on the allocation strategy:
 *
 * The dcache is a master of the icache - whenever a dcache entry
 * exists, the inode will always exist. "iput()" is done either when
 * the dcache entry is deleted or garbage collected.
 */

#include <linux/syscalls.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/cache.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <asm/uaccess.h>
#include <linux/security.h>
#include <linux/seqlock.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/fs_struct.h>
#include <linux/hardirq.h>
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
#include <linux/kasan.h>

#include "internal.h"
#include "mount.h"

/*
 * Usage:
 * dcache->d_inode->i_lock protects:
 *   - i_dentry, d_u.d_alias, d_inode of aliases
 * dcache_hash_bucket lock protects:
 *   - the dcache hash table
 * s_anon bl list spinlock protects:
 *   - the s_anon list (see __d_drop)
 * dentry->d_sb->s_dentry_lru_lock protects:
 *   - the dcache lru lists and counters
 * d_lock protects:
 *   - d_flags
 *   - d_name
 *   - d_lru
 *   - d_count
 *   - d_unhashed()
 *   - d_parent and d_subdirs
 *   - childrens' d_child and d_parent
 *   - d_u.d_alias, d_inode
 *
 * Ordering:
 * dentry->d_inode->i_lock
 *   dentry->d_lock
 *     dentry->d_sb->s_dentry_lru_lock
 *     dcache_hash_bucket lock
 *     s_anon lock
 *
 * If there is an ancestor relationship:
 * dentry->d_parent->...->d_parent->d_lock
 *   ...
 *     dentry->d_parent->d_lock
 *       dentry->d_lock
 *
 * If no ancestor relationship:
 * if (dentry1 < dentry2)
 *   dentry1->d_lock
 *     dentry2->d_lock
 */

int sysctl_vfs_cache_pressure __read_mostly = 100;

EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);

__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);


EXPORT_SYMBOL(rename_lock);


static struct kmem_cache *dentry_cache __read_mostly;

/*
 * This is the single most critical data structure when it comes
 * to the dcache: the hashtable for lookups. Somebody should try
 * to make this good - I've just made it work.
 *
 * This hash-function tries to avoid losing too many bits of hash
 * information, yet avoid using a prime hash-size or similar.
 */


static unsigned int d_hash_mask __read_mostly;

static unsigned int d_hash_shift __read_mostly;


static struct hlist_bl_head *dentry_hashtable __read_mostly;


static inline struct hlist_bl_head *d_hash(const struct dentry *parent, unsigned int hash) { hash += (unsigned long) parent / L1_CACHE_BYTES; return dentry_hashtable + hash_32(hash, d_hash_shift); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin3179.49%120.00%
linus torvaldslinus torvalds820.51%480.00%
Total39100.00%5100.00%

#define IN_LOOKUP_SHIFT 10 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent, unsigned int hash) { hash += (unsigned long) parent / L1_CACHE_BYTES; return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro39100.00%1100.00%
Total39100.00%1100.00%

/* Statistics gathering. */ struct dentry_stat_t dentry_stat = { .age_limit = 45, }; static DEFINE_PER_CPU(long, nr_dentry); static DEFINE_PER_CPU(long, nr_dentry_unused); #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) /* * Here we resort to our own counters instead of using generic per-cpu counters * for consistency with what the vfs inode code does. We are expected to harvest * better code and performance by having our own specialized counters. * * Please note that the loop is done over all possible CPUs, not over all online * CPUs. The reason for this is that we don't want to play games with CPUs going * on and off. If one of them goes off, we will just keep their counters. * * glommer: See cffbc8a for details, and if you ever intend to change this, * please update all vfs counters to match. */
static long get_nr_dentry(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_dentry, i); return sum < 0 ? 0 : sum; }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin3694.74%150.00%
glauber costaglauber costa25.26%150.00%
Total38100.00%2100.00%


static long get_nr_dentry_unused(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_dentry_unused, i); return sum < 0 ? 0 : sum; }

Contributors

PersonTokensPropCommitsCommitProp
dave chinnerdave chinner38100.00%1100.00%
Total38100.00%1100.00%


int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { dentry_stat.nr_dentry = get_nr_dentry(); dentry_stat.nr_unused = get_nr_dentry_unused(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig4379.63%120.00%
dave chinnerdave chinner712.96%120.00%
nick pigginnick piggin23.70%120.00%
joe perchesjoe perches11.85%120.00%
glauber costaglauber costa11.85%120.00%
Total54100.00%5100.00%

#endif /* * Compare 2 name strings, return 0 if they match, otherwise non-zero. * The strings are both count bytes long, and count is non-zero. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> /* * NOTE! 'cs' and 'scount' come from a dentry, so it has a * aligned allocation for this particular component. We don't * strictly need the load_unaligned_zeropad() safety, but it * doesn't hurt either. * * In contrast, 'ct' and 'tcount' can be from a pathname, and do * need the careful unaligned handling. */
static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) { unsigned long a,b,mask; for (;;) { a = *(unsigned long *)cs; b = load_unaligned_zeropad(ct); if (tcount < sizeof(unsigned long)) break; if (unlikely(a != b)) return 1; cs += sizeof(unsigned long); ct += sizeof(unsigned long); tcount -= sizeof(unsigned long); if (!tcount) return 0; } mask = bytemask_from_count(tcount); return unlikely(!!((a ^ b) & mask)); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds12997.73%583.33%
will deaconwill deacon32.27%116.67%
Total132100.00%6100.00%

#else
static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) { do { if (*cs != *ct) return 1; cs++; ct++; tcount--; } while (tcount); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds53100.00%4100.00%
Total53100.00%4100.00%

#endif
static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) { const unsigned char *cs; /* * Be careful about RCU walk racing with rename: * use ACCESS_ONCE to fetch the name pointer. * * NOTE! Even if a rename will mean that the length * was not loaded atomically, we don't care. The * RCU walk will check the sequence count eventually, * and catch it. And we won't overrun the buffer, * because we're reading the name pointer atomically, * and a dentry name is guaranteed to be properly * terminated with a NUL byte. * * End result: even if 'len' is wrong, we'll exit * early because the data cannot match (there can * be no NUL in the ct/tcount data) */ cs = ACCESS_ONCE(dentry->d_name.name); smp_read_barrier_depends(); return dentry_string_cmp(cs, ct, tcount); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds53100.00%2100.00%
Total53100.00%2100.00%

struct external_name { union { atomic_t count; struct rcu_head head; } u; unsigned char name[]; };
static inline struct external_name *external_name(struct dentry *dentry) { return container_of(dentry->d_name.name, struct external_name, name[0]); }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro32100.00%1100.00%
Total32100.00%1100.00%


static void __d_free(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); kmem_cache_free(dentry_cache, dentry); }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig1954.29%133.33%
eric dumazeteric dumazet1131.43%133.33%
al viroal viro514.29%133.33%
Total35100.00%3100.00%


static void __d_free_external(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); kfree(external_name(dentry)); kmem_cache_free(dentry_cache, dentry); }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro3069.77%150.00%
eric dumazeteric dumazet1330.23%150.00%
Total43100.00%2100.00%


static inline int dname_external(const struct dentry *dentry) { return dentry->d_name.name != dentry->d_iname; }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro24100.00%1100.00%
Total24100.00%1100.00%


static inline void __d_set_inode_and_type(struct dentry *dentry, struct inode *inode, unsigned type_flags) { unsigned flags; dentry->d_inode = inode; flags = READ_ONCE(dentry->d_flags); flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); flags |= type_flags; WRITE_ONCE(dentry->d_flags, flags); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells60100.00%1100.00%
Total60100.00%1100.00%


static inline void __d_clear_type_and_inode(struct dentry *dentry) { unsigned flags = READ_ONCE(dentry->d_flags); flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); WRITE_ONCE(dentry->d_flags, flags); dentry->d_inode = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells46100.00%1100.00%
Total46100.00%1100.00%


static void dentry_free(struct dentry *dentry) { WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); if (unlikely(dname_external(dentry))) { struct external_name *p = external_name(dentry); if (likely(atomic_dec_and_test(&p->u.count))) { call_rcu(&dentry->d_u.d_rcu, __d_free_external); return; } } /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro112100.00%3100.00%
Total112100.00%3100.00%

/** * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups * @dentry: the target dentry * After this call, in-progress rcu-walk path lookup will fail. This * should be called after unhashing, and after changing d_inode (if * the dentry has not already been unhashed). */
static inline void dentry_rcuwalk_invalidate(struct dentry *dentry) { lockdep_assert_held(&dentry->d_lock); /* Go through am invalidation barrier */ write_seqcount_invalidate(&dentry->d_seq); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin2586.21%150.00%
peter zijlstrapeter zijlstra413.79%150.00%
Total29100.00%2100.00%

/* * Release the dentry's inode, using the filesystem * d_iput() operation if defined. Dentry has no refcount * and is unhashed. */
static void dentry_iput(struct dentry * dentry) __releases(dentry->d_lock) __releases(dentry->d_inode->i_lock) { struct inode *inode = dentry->d_inode; if (inode) { __d_clear_type_and_inode(dentry); hlist_del_init(&dentry->d_u.d_alias); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); if (!inode->i_nlink) fsnotify_inoderemove(inode); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else iput(inode); } else { spin_unlock(&dentry->d_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git6855.28%325.00%
andrew mortonandrew morton1814.63%18.33%
nick pigginnick piggin118.94%216.67%
miklos szeredimiklos szeredi86.50%18.33%
linus torvaldslinus torvalds75.69%18.33%
john mccutchanjohn mccutchan54.07%18.33%
al viroal viro32.44%216.67%
david howellsdavid howells32.44%18.33%
Total123100.00%12100.00%

/* * Release the dentry's inode, using the filesystem * d_iput() operation if defined. dentry remains in-use. */
static void dentry_unlink_inode(struct dentry * dentry) __releases(dentry->d_lock) __releases(dentry->d_inode->i_lock) { struct inode *inode = dentry->d_inode; raw_write_seqcount_begin(&dentry->d_seq); __d_clear_type_and_inode(dentry); hlist_del_init(&dentry->d_u.d_alias); raw_write_seqcount_end(&dentry->d_seq); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); if (!inode->i_nlink) fsnotify_inoderemove(inode); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else iput(inode); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin10283.61%228.57%
al viroal viro1512.30%342.86%
david howellsdavid howells54.10%228.57%
Total122100.00%7100.00%

/* * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry * is in use - which includes both the "real" per-superblock * LRU list _and_ the DCACHE_SHRINK_LIST use. * * The DCACHE_SHRINK_LIST bit is set whenever the dentry is * on the shrink list (ie not on the superblock LRU list). * * The per-cpu "nr_dentry_unused" counters are updated with * the DCACHE_LRU_LIST bit. * * These helper functions make sure we always follow the * rules. d_lock must be held by the caller. */ #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
static void d_lru_add(struct dentry *dentry) { D_FLAG_VERIFY(dentry, 0); dentry->d_flags |= DCACHE_LRU_LIST; this_cpu_inc(nr_dentry_unused); WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds2143.75%233.33%
kentaro makitakentaro makita2041.67%116.67%
nick pigginnick piggin510.42%116.67%
christoph hellwigchristoph hellwig12.08%116.67%
dave chinnerdave chinner12.08%116.67%
Total48100.00%6100.00%


static void d_lru_del(struct dentry *dentry) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags &= ~DCACHE_LRU_LIST; this_cpu_dec(nr_dentry_unused); WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds4183.67%240.00%
nick pigginnick piggin510.20%120.00%
dave chinnerdave chinner24.08%120.00%
miklos szeredimiklos szeredi12.04%120.00%
Total49100.00%5100.00%


static void d_shrink_del(struct dentry *dentry) { D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); list_del_init(&dentry->d_lru); dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); this_cpu_dec(nr_dentry_unused); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds2147.73%250.00%
kentaro makitakentaro makita1636.36%125.00%
dave chinnerdave chinner715.91%125.00%
Total44100.00%4100.00%


static void d_shrink_add(struct dentry *dentry, struct list_head *list) { D_FLAG_VERIFY(dentry, 0); list_add(&dentry->d_lru, list); dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; this_cpu_inc(nr_dentry_unused); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds4189.13%150.00%
kentaro makitakentaro makita510.87%150.00%
Total46100.00%2100.00%

/* * These can only be called under the global LRU lock, ie during the * callback for freeing the LRU list. "isolate" removes it from the * LRU lists entirely, while shrink_move moves it to the indicated * private list. */
static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags &= ~DCACHE_LRU_LIST; this_cpu_dec(nr_dentry_unused); list_lru_isolate(lru, &dentry->d_lru); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds3782.22%150.00%
vladimir davydovvladimir davydov817.78%150.00%
Total45100.00%2100.00%


static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, struct list_head *list) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags |= DCACHE_SHRINK_LIST; list_lru_isolate_move(lru, &dentry->d_lru, list); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds3371.74%133.33%
vladimir davydovvladimir davydov817.39%133.33%
christoph hellwigchristoph hellwig510.87%133.33%
Total46100.00%3100.00%

/* * dentry_lru_(add|del)_list) must be called with d_lock held. */
static void dentry_lru_add(struct dentry *dentry) { if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) d_lru_add(dentry); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds2583.33%125.00%
nick pigginnick piggin310.00%125.00%
christoph hellwigchristoph hellwig13.33%125.00%
dave chinnerdave chinner13.33%125.00%
Total30100.00%4100.00%

/** * d_drop - drop a dentry * @dentry: dentry to drop * * d_drop() unhashes the entry from the parent dentry hashes, so that it won't * be found through a VFS lookup any more. Note that this is different from * deleting the dentry - d_delete will try to mark the dentry negative if * possible, giving a successful _negative_ lookup, while d_drop will * just make the cache lookup fail. * * d_drop() is used mainly for stuff that wants to invalidate a dentry for some * reason (NFS timeouts or autofs deletes). * * __d_drop requires dentry->d_lock. */
void __d_drop(struct dentry *dentry) { if (!d_unhashed(dentry)) { struct hlist_bl_head *b; /* * Hashed dentries are normally on the dentry hashtable, * with the exception of those newly allocated by * d_obtain_alias, which are always IS_ROOT: */ if (unlikely(IS_ROOT(dentry))) b = &dentry->d_sb->s_anon; else b = d_hash(dentry->d_parent, dentry->d_name.hash); hlist_bl_lock(b); __hlist_bl_del(&dentry->d_hash); dentry->d_hash.pprev = NULL; hlist_bl_unlock(b); dentry_rcuwalk_invalidate(dentry); } }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin6469.57%333.33%
linus torvaldslinus torvalds2021.74%222.22%
j. bruce fieldsj. bruce fields44.35%111.11%
christoph hellwigchristoph hellwig22.17%111.11%
al viroal viro11.09%111.11%
peter zijlstrapeter zijlstra11.09%111.11%
Total92100.00%9100.00%

EXPORT_SYMBOL(__d_drop);
void d_drop(struct dentry *dentry) { spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin31100.00%1100.00%
Total31100.00%1100.00%

EXPORT_SYMBOL(d_drop);
static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent) { struct dentry *next; /* * Inform d_walk() and shrink_dentry_list() that we are no longer * attached to the dentry tree */ dentry->d_flags |= DCACHE_DENTRY_KILLED; if (unlikely(list_empty(&dentry->d_child))) return; __list_del_entry(&dentry->d_child); /* * Cursors can move around the list of children. While we'd been * a normal list member, it didn't matter - ->d_child.next would've * been updated. However, from now on it won't be and for the * things like d_walk() it might end up with a nasty surprise. * Normally d_walk() doesn't care about cursors moving around - * ->d_lock on parent prevents that and since a cursor has no children * of its own, we get through it without ever unlocking the parent. * There is one exception, though - if we ascend from a child that * gets killed as soon as we unlock it, the next sibling is found * using the value left in its ->d_child.next. And if _that_ * pointed to a cursor, and cursor got moved (e.g. by lseek()) * before d_walk() regains parent->d_lock, we'll end up skipping * everything the cursor had been moved past. * * Solution: make sure that the pointer left behind in ->d_child.next * points to something that won't be moving around. I.e. skip the * cursors. */ while (dentry->d_child.next != &parent->d_subdirs) { next = list_entry(dentry->d_child.next, struct dentry, d_child); if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) break; dentry->d_child.next = next->d_child.next; } }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro110100.00%1100.00%
Total110100.00%1100.00%


static void __dentry_kill(struct dentry *dentry) { struct dentry *parent = NULL; bool can_free = true; if (!IS_ROOT(dentry)) parent = dentry->d_parent; /* * The dentry is now unrecoverably dead to the world. */ lockref_mark_dead(&dentry->d_lockref); /* * inform the fs via d_prune that this dentry is about to be * unhashed and destroyed. */ if (dentry->d_flags & DCACHE_OP_PRUNE) dentry->d_op->d_prune(dentry); if (dentry->d_flags & DCACHE_LRU_LIST) { if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) d_lru_del(dentry); } /* if it was on the hash then remove it */ __d_drop(dentry); dentry_unlist(dentry, parent); if (parent) spin_unlock(&parent->d_lock); dentry_iput(dentry); /* * dentry_iput drops the locks, at which point nobody (except * transient RCU lookups) can reach this dentry. */ BUG_ON(dentry->d_lockref.count > 0); this_cpu_dec(nr_dentry); if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_SHRINK_LIST) { dentry->d_flags |= DCACHE_MAY_FREE; can_free = false; } spin_unlock(&dentry->d_lock); if (likely(can_free)) dentry_free(dentry); }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro13967.48%660.00%
nick pigginnick piggin4320.87%110.00%
zheng yanzheng yan188.74%110.00%
linus torvaldslinus torvalds52.43%110.00%
waiman longwaiman long10.49%110.00%
Total206100.00%10100.00%

/* * Finish off a dentry we've decided to kill. * dentry->d_lock must be held, returns with it unlocked. * If ref is non-zero, then decrement the refcount too. * Returns dentry requiring refcount drop, or NULL if we're done. */
static struct dentry *dentry_kill(struct dentry *dentry) __releases(dentry->d_lock) { struct inode *inode = dentry->d_inode; struct dentry *parent = NULL; if (inode && unlikely(!spin_trylock(&inode->i_lock))) goto failed; if (!IS_ROOT(dentry)) { parent = dentry->d_parent; if (unlikely(!spin_trylock(&parent->d_lock))) { if (inode) spin_unlock(&inode->i_lock); goto failed; } } __dentry_kill(dentry); return parent; failed: spin_unlock(&dentry->d_lock); cpu_relax(); return dentry; /* try again with same dentry */ }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro12498.41%266.67%
nick pigginnick piggin21.59%133.33%
Total126100.00%3100.00%


static inline struct dentry *lock_parent(struct dentry *dentry) { struct dentry *parent = dentry->d_parent; if (IS_ROOT(dentry)) return NULL; if (unlikely(dentry->d_lockref.count < 0)) return NULL; if (likely(spin_trylock(&parent->d_lock))) return parent; rcu_read_lock(); spin_unlock(&dentry->d_lock); again: parent = ACCESS_ONCE(dentry->d_parent); spin_lock(&parent->d_lock); /* * We can't blindly lock dentry until we are sure * that we won't violate the locking order. * Any changes of dentry->d_parent must have * been done with parent->d_lock held, so * spin_lock() above is enough of a barrier * for checking if it's still our child. */ if (unlikely(parent != dentry->d_parent)) { spin_unlock(&parent->d_lock); goto again; } rcu_read_unlock(); if (parent != dentry) spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); else parent = NULL; return parent; }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro14497.96%266.67%
linus torvaldslinus torvalds32.04%133.33%
Total147100.00%3100.00%

/* * Try to do a lockless dput(), and return whether that was successful. * * If unsuccessful, we return false, having already taken the dentry lock. * * The caller needs to hold the RCU read lock, so that the dentry is * guaranteed to stay around even if the refcount goes down to zero! */
static inline bool fast_dput(struct dentry *dentry) { int ret; unsigned int d_flags; /* * If we have a d_op->d_delete() operation, we sould not * let the dentry count go to zero, so use "put_or_lock". */ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) return lockref_put_or_lock(&dentry->d_lockref); /* * .. otherwise, we can try to just decrement the * lockref optimistically. */ ret = lockref_put_return(&dentry->d_lockref); /* * If the lockref_put_return() failed due to the lock being held * by somebody else, the fast path has failed. We will need to * get the lock, and then check the count again. */ if (unlikely(ret < 0)) { spin_lock(&dentry->d_lock); if (dentry->d_lockref.count > 1) { dentry->d_lockref.count--; spin_unlock(&dentry->d_lock); return 1; } return 0; } /* * If we weren't the last ref, we're done. */ if (ret) return 1; /* * Careful, careful. The reference count went down * to zero, but we don't hold the dentry lock, so * somebody else could get it again, and do another * dput(), and we need to not race with that. * * However, there is a very special and common case * where we don't care, because there is nothing to * do: the dentry is still hashed, it does not have * a 'delete' op, and it's referenced and already on * the LRU list. * * NOTE! Since we aren't locked, these values are * not "stable". However, it is sufficient that at * some point after we dropped the reference the * dentry was hashed and the flags had the proper * value. Other dentry users may have re-gotten * a reference to the dentry and change that, but * our work is done - we can leave the dentry * around with a zero refcount. */ smp_rmb(); d_flags = ACCESS_ONCE(dentry->d_flags); d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED; /* Nothing to do? Dropping the reference was all we needed? */ if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) return 1; /* * Not the fast normal case? Get the lock. We've already decremented * the refcount, but we'll need to re-check the situation after * getting the lock. */ spin_lock(&dentry->d_lock); /* * Did somebody else grab a reference to it in the meantime, and * we're no longer the last user after all? Alternatively, somebody * else could have killed it and marked it dead. Either way, we * don't need to do anything else. */ if (dentry->d_lockref.count) { spin_unlock(&dentry->d_lock); return 1; } /* * Re-get the reference we optimistically dropped. We hold the * lock, and we just tested that it was zero, so we can just * set it to 1. */ dentry->d_lockref.count = 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds19398.47%150.00%
al viroal viro31.53%150.00%
Total196100.00%2100.00%

/* * This is dput * * This is complicated by the fact that we do not want to put * dentries that are no longer on any hash chain on the unused * list: we'd much rather just get rid of them immediately. * * However, that implies that we have to traverse the dentry * tree upwards to the parents which might _also_ now be * scheduled for deletion (it may have been only waiting for * its last child to go away). * * This tail recursion is done by hand as we don't want to depend * on the compiler to always get this right (gcc generally doesn't). * Real recursion would eat up our stack space. */ /* * dput - release a dentry * @dentry: dentry to release * * Release a dentry. This will drop the usage count and if appropriate * call the dentry unlink method as well as removing it from the queues and * releasing its resources. If the parent dentries were scheduled for release * they too may now get deleted. */
void dput(struct dentry *dentry) { if (unlikely(!dentry)) return; repeat: rcu_read_lock(); if (likely(fast_dput(dentry))) { rcu_read_unlock(); return; } /* Slow case: now with the dentry lock held */ rcu_read_unlock(); WARN_ON(d_in_lookup(dentry)); /* Unreachable? Get rid of it */ if (unlikely(d_unhashed(dentry))) goto kill_it; if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) goto kill_it; if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { if (dentry->d_op->d_delete(dentry)) goto kill_it; } if (!(dentry->d_flags & DCACHE_REFERENCED)) dentry->d_flags |= DCACHE_REFERENCED; dentry_lru_add(dentry); dentry->d_lockref.count--; spin_unlock(&dentry->d_lock); return; kill_it: dentry = dentry_kill(dentry); if (dentry) goto repeat; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git5534.16%939.13%
linus torvaldslinus torvalds4427.33%313.04%
nick pigginnick piggin2213.66%521.74%
al viroal viro2213.66%28.70%
andrew mortonandrew morton116.83%14.35%
waiman longwaiman long42.48%14.35%
ingo molnaringo molnar21.24%14.35%
kentaro makitakentaro makita10.62%14.35%
Total161100.00%23100.00%

EXPORT_SYMBOL(dput); /* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry) { dentry->d_lockref.count++; }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin1684.21%375.00%
waiman longwaiman long315.79%125.00%
Total19100.00%4100.00%


static inline void __dget(struct dentry *dentry) { lockref_get(&dentry->d_lockref); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git945.00%120.00%
nick pigginnick piggin945.00%360.00%
waiman longwaiman long210.00%120.00%
Total20100.00%5100.00%


struct dentry *dget_parent(struct dentry *dentry) { int gotref; struct dentry *ret; /* * Do optimistic parent lookup without any * locking. */ rcu_read_lock(); ret = ACCESS_ONCE(dentry->d_parent); gotref = lockref_get_not_zero(&ret->d_lockref); rcu_read_unlock(); if (likely(gotref)) { if (likely(ret == ACCESS_ONCE(dentry->d_parent))) return ret; dput(ret); } repeat: /* * Don't need rcu_dereference because we re-check it was correct under * the lock. */ rcu_read_lock(); ret = dentry->d_parent; spin_lock(&ret->d_lock); if (unlikely(ret != dentry->d_parent)) { spin_unlock(&ret