Release 4.7 fs/namespace.c
/*
* linux/fs/namespace.c
*
* (C) Copyright Al Viro 2000, 2001
* Released under GPL v2.
*
* Based on code from fs/super.c, copyright Linus Torvalds and others.
* Heavily rewritten.
*/
#include <linux/syscalls.h>
#include <linux/export.h>
#include <linux/capability.h>
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/idr.h>
#include <linux/init.h> /* init_rootfs */
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/bootmem.h>
#include <linux/task_work.h>
#include "pnode.h"
#include "internal.h"
static unsigned int m_hash_mask __read_mostly;
static unsigned int m_hash_shift __read_mostly;
static unsigned int mp_hash_mask __read_mostly;
static unsigned int mp_hash_shift __read_mostly;
static __initdata unsigned long mhash_entries;
static int __init set_mhash_entries(char *str)
{
if (!str)
return 0;
mhash_entries = simple_strtoul(str, &str, 0);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
__setup("mhash_entries=", set_mhash_entries);
static __initdata unsigned long mphash_entries;
static int __init set_mphash_entries(char *str)
{
if (!str)
return 0;
mphash_entries = simple_strtoul(str, &str, 0);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
__setup("mphash_entries=", set_mphash_entries);
static u64 event;
static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
static DEFINE_SPINLOCK(mnt_id_lock);
static int mnt_id_start = 0;
static int mnt_group_start = 1;
static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static DECLARE_RWSEM(namespace_sem);
/* /sys/fs */
struct kobject *fs_kobj;
EXPORT_SYMBOL_GPL(fs_kobj);
/*
* vfsmount lock may be taken for read to prevent changes to the
* vfsmount hash, ie. during mountpoint lookups or walking back
* up the tree.
*
* It should be taken for write in all cases where the vfsmount
* tree or hash is modified or when a vfsmount structure is modified.
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> m_hash_shift);
return &mount_hashtable[tmp & m_hash_mask];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 52 | 81.25% | 1 | 33.33% |
al viro | al viro | 12 | 18.75% | 2 | 66.67% |
| Total | 64 | 100.00% | 3 | 100.00% |
static inline struct hlist_head *mp_hash(struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> mp_hash_shift);
return &mountpoint_hashtable[tmp & mp_hash_mask];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 43 | 91.49% | 1 | 33.33% |
linus torvalds | linus torvalds | 2 | 4.26% | 1 | 33.33% |
eric dumazet | eric dumazet | 2 | 4.26% | 1 | 33.33% |
| Total | 47 | 100.00% | 3 | 100.00% |
/*
* allocation is serialized by namespace_sem, but we need the spinlock to
* serialize with freeing.
*/
static int mnt_alloc_id(struct mount *mnt)
{
int res;
retry:
ida_pre_get(&mnt_id_ida, GFP_KERNEL);
spin_lock(&mnt_id_lock);
res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
if (!res)
mnt_id_start = mnt->mnt_id + 1;
spin_unlock(&mnt_id_lock);
if (res == -EAGAIN)
goto retry;
return res;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 58 | 75.32% | 1 | 25.00% |
al viro | al viro | 17 | 22.08% | 2 | 50.00% |
nick piggin | nick piggin | 2 | 2.60% | 1 | 25.00% |
| Total | 77 | 100.00% | 4 | 100.00% |
static void mnt_free_id(struct mount *mnt)
{
int id = mnt->mnt_id;
spin_lock(&mnt_id_lock);
ida_remove(&mnt_id_ida, id);
if (mnt_id_start > id)
mnt_id_start = id;
spin_unlock(&mnt_id_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 27 | 56.25% | 1 | 25.00% |
al viro | al viro | 19 | 39.58% | 2 | 50.00% |
nick piggin | nick piggin | 2 | 4.17% | 1 | 25.00% |
| Total | 48 | 100.00% | 4 | 100.00% |
/*
* Allocate a new peer group ID
*
* mnt_group_ida is protected by namespace_sem
*/
static int mnt_alloc_group_id(struct mount *mnt)
{
int res;
if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
return -ENOMEM;
res = ida_get_new_above(&mnt_group_ida,
mnt_group_start,
&mnt->mnt_group_id);
if (!res)
mnt_group_start = mnt->mnt_group_id + 1;
return res;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 37 | 61.67% | 1 | 33.33% |
al viro | al viro | 23 | 38.33% | 2 | 66.67% |
| Total | 60 | 100.00% | 3 | 100.00% |
/*
* Release a peer group ID
*/
void mnt_release_group_id(struct mount *mnt)
{
int id = mnt->mnt_group_id;
ida_remove(&mnt_group_ida, id);
if (mnt_group_start > id)
mnt_group_start = id;
mnt->mnt_group_id = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 22 | 53.66% | 1 | 33.33% |
al viro | al viro | 19 | 46.34% | 2 | 66.67% |
| Total | 41 | 100.00% | 3 | 100.00% |
/*
* vfsmount lock must be held for read
*/
static inline void mnt_add_count(struct mount *mnt, int n)
{
#ifdef CONFIG_SMP
this_cpu_add(mnt->mnt_pcp->mnt_count, n);
#else
preempt_disable();
mnt->mnt_count += n;
preempt_enable();
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 44 | 97.78% | 1 | 50.00% |
al viro | al viro | 1 | 2.22% | 1 | 50.00% |
| Total | 45 | 100.00% | 2 | 100.00% |
/*
* vfsmount lock must be held for write
*/
unsigned int mnt_get_count(struct mount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
int cpu;
for_each_possible_cpu(cpu) {
count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
}
return count;
#else
return mnt->mnt_count;
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 52 | 96.30% | 1 | 33.33% |
al viro | al viro | 2 | 3.70% | 2 | 66.67% |
| Total | 54 | 100.00% | 3 | 100.00% |
static void drop_mountpoint(struct fs_pin *p)
{
struct mount *m = container_of(p, struct mount, mnt_umount);
dput(m->mnt_ex_mountpoint);
pin_remove(p);
mntput(&m->mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 46 | 100.00% | 1 | 100.00% |
| Total | 46 | 100.00% | 1 | 100.00% |
static struct mount *alloc_vfsmnt(const char *name)
{
struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
if (mnt) {
int err;
err = mnt_alloc_id(mnt);
if (err)
goto out_free_cache;
if (name) {
mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
if (!mnt->mnt_devname)
goto out_free_id;
}
#ifdef CONFIG_SMP
mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
if (!mnt->mnt_pcp)
goto out_free_devname;
this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
#else
mnt->mnt_count = 1;
mnt->mnt_writers = 0;
#endif
INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
INIT_LIST_HEAD(&mnt->mnt_expire);
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif
init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
}
return mnt;
#ifdef CONFIG_SMP
out_free_devname:
kfree_const(mnt->mnt_devname);
#endif
out_free_id:
mnt_free_id(mnt);
out_free_cache:
kmem_cache_free(mnt_cache, mnt);
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 61 | 23.83% | 2 | 8.70% |
linus torvalds | linus torvalds | 55 | 21.48% | 2 | 8.70% |
al viro | al viro | 40 | 15.62% | 7 | 30.43% |
li zefan | li zefan | 23 | 8.98% | 1 | 4.35% |
ram pai | ram pai | 21 | 8.20% | 2 | 8.70% |
miklos szeredi | miklos szeredi | 20 | 7.81% | 2 | 8.70% |
andreas gruenbacher | andreas gruenbacher | 13 | 5.08% | 1 | 4.35% |
eric w. biederman | eric w. biederman | 8 | 3.12% | 1 | 4.35% |
david howells | david howells | 6 | 2.34% | 1 | 4.35% |
dave hansen | dave hansen | 5 | 1.95% | 1 | 4.35% |
andrzej hajda | andrzej hajda | 2 | 0.78% | 1 | 4.35% |
robert p. j. day | robert p. j. day | 1 | 0.39% | 1 | 4.35% |
andries brouwer | andries brouwer | 1 | 0.39% | 1 | 4.35% |
| Total | 256 | 100.00% | 23 | 100.00% |
/*
* Most r/o checks on a fs are for operations that take
* discrete amounts of time, like a write() or unlink().
* We must keep track of when those operations start
* (for permission checks) and when they end, so that
* we can determine when writes are able to occur to
* a filesystem.
*/
/*
* __mnt_is_readonly: check whether a mount is read-only
* @mnt: the mount to check for its write status
*
* This shouldn't be used directly ouside of the VFS.
* It does not guarantee that the filesystem will stay
* r/w, just that it is right *now*. This can not and
* should not be used in place of IS_RDONLY(inode).
* mnt_want/drop_write() will _keep_ the filesystem
* r/w.
*/
int __mnt_is_readonly(struct vfsmount *mnt)
{
if (mnt->mnt_flags & MNT_READONLY)
return 1;
if (mnt->mnt_sb->s_flags & MS_RDONLY)
return 1;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave hansen | dave hansen | 37 | 100.00% | 2 | 100.00% |
| Total | 37 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(__mnt_is_readonly);
static inline void mnt_inc_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
this_cpu_inc(mnt->mnt_pcp->mnt_writers);
#else
mnt->mnt_writers++;
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 16 | 48.48% | 3 | 60.00% |
dave hansen | dave hansen | 16 | 48.48% | 1 | 20.00% |
al viro | al viro | 1 | 3.03% | 1 | 20.00% |
| Total | 33 | 100.00% | 5 | 100.00% |
static inline void mnt_dec_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
this_cpu_dec(mnt->mnt_pcp->mnt_writers);
#else
mnt->mnt_writers--;
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 17 | 51.52% | 3 | 60.00% |
dave hansen | dave hansen | 15 | 45.45% | 1 | 20.00% |
al viro | al viro | 1 | 3.03% | 1 | 20.00% |
| Total | 33 | 100.00% | 5 | 100.00% |
static unsigned int mnt_get_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
int cpu;
for_each_possible_cpu(cpu) {
count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
}
return count;
#else
return mnt->mnt_writers;
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 30 | 54.55% | 3 | 60.00% |
dave hansen | dave hansen | 24 | 43.64% | 1 | 20.00% |
al viro | al viro | 1 | 1.82% | 1 | 20.00% |
| Total | 55 | 100.00% | 5 | 100.00% |
static int mnt_is_readonly(struct vfsmount *mnt)
{
if (mnt->mnt_sb->s_readonly_remount)
return 1;
/* Order wrt setting s_flags/s_readonly_remount in do_remount() */
smp_rmb();
return __mnt_is_readonly(mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 32 | 100.00% | 1 | 100.00% |
| Total | 32 | 100.00% | 1 | 100.00% |
/*
* Most r/o & frozen checks on a fs are for operations that take discrete
* amounts of time, like a write() or unlink(). We must keep track of when
* those operations start (for permission checks) and when they end, so that we
* can determine when writes are able to occur to a filesystem.
*/
/**
* __mnt_want_write - get write access to a mount without freeze protection
* @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is about to be performed to
* it, and makes sure that writes are allowed (mnt it read-write) before
* returning success. This operation does not protect against filesystem being
* frozen. When the write operation is finished, __mnt_drop_write() must be
* called. This is effectively a refcount.
*/
int __mnt_want_write(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
int ret = 0;
preempt_disable();
mnt_inc_writers(mnt);
/*
* The store to mnt_inc_writers must be visible before we pass
* MNT_WRITE_HOLD loop below, so that the slowpath can see our
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
* MNT_WRITE_HOLD is cleared.
*/
smp_rmb();
if (mnt_is_readonly(m)) {
mnt_dec_writers(mnt);
ret = -EROFS;
}
preempt_enable();
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 39 | 47.56% | 2 | 25.00% |
dave hansen | dave hansen | 24 | 29.27% | 2 | 25.00% |
al viro | al viro | 14 | 17.07% | 1 | 12.50% |
miao xie | miao xie | 3 | 3.66% | 1 | 12.50% |
jan kara | jan kara | 1 | 1.22% | 1 | 12.50% |
miklos szeredi | miklos szeredi | 1 | 1.22% | 1 | 12.50% |
| Total | 82 | 100.00% | 8 | 100.00% |
/**
* mnt_want_write - get write access to a mount
* @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is about to be performed to
* it, and makes sure that writes are allowed (mount is read-write, filesystem
* is not frozen) before returning success. When the write operation is
* finished, mnt_drop_write() must be called. This is effectively a refcount.
*/
int mnt_want_write(struct vfsmount *m)
{
int ret;
sb_start_write(m->mnt_sb);
ret = __mnt_want_write(m);
if (ret)
sb_end_write(m->mnt_sb);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jan kara | jan kara | 41 | 100.00% | 1 | 100.00% |
| Total | 41 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(mnt_want_write);
/**
* mnt_clone_write - get write access to a mount
* @mnt: the mount on which to take a write
*
* This is effectively like mnt_want_write, except
* it must only be used to take an extra write reference
* on a mountpoint that we already know has a write reference
* on it. This allows some optimisation.
*
* After finished, mnt_drop_write must be called as usual to
* drop the reference.
*/
int mnt_clone_write(struct vfsmount *mnt)
{
/* superblock may be r/o */
if (__mnt_is_readonly(mnt))
return -EROFS;
preempt_disable();
mnt_inc_writers(real_mount(mnt));
preempt_enable();
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 36 | 92.31% | 2 | 66.67% |
al viro | al viro | 3 | 7.69% | 1 | 33.33% |
| Total | 39 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(mnt_clone_write);
/**
* __mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
* This is like __mnt_want_write, but it takes a file and can
* do some optimisations if the file is open for write already
*/
int __mnt_want_write_file(struct file *file)
{
if (!(file->f_mode & FMODE_WRITER))
return __mnt_want_write(file->f_path.mnt);
else
return mnt_clone_write(file->f_path.mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 39 | 92.86% | 1 | 33.33% |
jan kara | jan kara | 2 | 4.76% | 1 | 33.33% |
al viro | al viro | 1 | 2.38% | 1 | 33.33% |
| Total | 42 | 100.00% | 3 | 100.00% |
/**
* mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
* This is like mnt_want_write, but it takes a file and can
* do some optimisations if the file is open for write already
*/
int mnt_want_write_file(struct file *file)
{
int ret;
sb_start_write(file->f_path.mnt->mnt_sb);
ret = __mnt_want_write_file(file);
if (ret)
sb_end_write(file->f_path.mnt->mnt_sb);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jan kara | jan kara | 49 | 100.00% | 1 | 100.00% |
| Total | 49 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/**
* __mnt_drop_write - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done
* performing writes to it. Must be matched with
* __mnt_want_write() call above.
*/
void __mnt_drop_write(struct vfsmount *mnt)
{
preempt_disable();
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nick piggin | nick piggin | 14 | 58.33% | 2 | 40.00% |
dave hansen | dave hansen | 6 | 25.00% | 1 | 20.00% |
al viro | al viro | 3 | 12.50% | 1 | 20.00% |
jan kara | jan kara | 1 | 4.17% | 1 | 20.00% |
| Total | 24 | 100.00% | 5 | 100.00% |
/**
* mnt_drop_write - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done performing writes to it and
* also allows filesystem to be frozen again. Must be matched with
* mnt_want_write() call above.
*/
void mnt_drop_write(struct vfsmount *mnt)
{
__mnt_drop_write(mnt);
sb_end_write(mnt->mnt_sb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jan kara | jan kara | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(mnt_drop_write);
void __mnt_drop_write_file(struct file *file)
{
__mnt_drop_write(file->f_path.mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jan kara | jan kara | 19 | 100.00% | 1 | 100.00% |
| Total | 19 | 100.00% | 1 | 100.00% |
void mnt_drop_write_file(struct file *file)
{
mnt_drop_write(file->f_path.mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 19 | 100.00% | 1 | 100.00% |
| Total | 19 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(mnt_drop_write_file);
static int mnt_make_readonly(struct mount *mnt)
{
int ret = 0;
lock_mount_hash();
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
/*
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
* should be visible before we do.
*/
smp_mb();
/*
* With writers on hold, if this value is zero, then there are
* definitely no active writers (although held writers may subsequently
* increment the count, they'll have to wait, and decrement it after
* seeing MNT_READONLY).
*
* It is OK to have counter incremented on one CPU and decremented on
* another: the sum will add up correctly. The danger would be when we
* sum up each counter, if we read a counter before it is incremented,
* but then read another CPU's count which it has been subsequently
* decremented from -- we would see more decrements than we should.
* MNT_WRITE_HOLD protects against this scenario, because
* mnt_want_write first increments count, then smp_mb, then spins on
* MNT_WRITE_HOLD, so it can't be decremented by another CPU while
* we're counting up here.
*/
if (mnt_get_writers(mnt) > 0)
ret = -EBUSY;
else
mnt->mnt.mnt_flags |= MNT_READONLY;
/*
* MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
* that become unheld will see MNT_READONLY.
*/
smp_wmb();
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
unlock_mount_hash();
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave hansen | dave hansen | 40 | 54.05% | 3 | 42.86% |
nick piggin | nick piggin | 23 | 31.08% | 2 | 28.57% |
al viro | al viro | 11 | 14.86% | 2 | 28.57% |
| Total | 74 | 100.00% | 7 | 100.00% |
static void __mnt_unmake_readonly(struct mount *mnt)
{
lock_mount_hash();
mnt->mnt.mnt_flags &= ~MNT_READONLY;
unlock_mount_hash();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave hansen | dave hansen | 19 | 73.08% | 1 | 33.33% |
al viro | al viro | 7 | 26.92% | 2 | 66.67% |
| Total | 26 | 100.00% | 3 | 100.00% |
int sb_prepare_remount_readonly(struct super_block *sb)
{
struct mount *mnt;
int err = 0;
/* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
if (atomic_long_read(&sb->s_remove_count))
return -EBUSY;
lock_mount_hash();
list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
smp_mb();
if (mnt_get_writers(mnt) > 0) {
err = -EBUSY;
break;
}
}
}
if (!err && atomic_long_read(&sb->s_remove_count))
err = -EBUSY;
if (!err) {
sb->s_readonly_remount = 1;
smp_wmb();
}
list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
}
unlock_mount_hash();
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 156 | 97.50% | 2 | 66.67% |
al viro | al viro | 4 | 2.50% | 1 | 33.33% |
| Total | 160 | 100.00% | 3 | 100.00% |
static void free_vfsmnt(struct mount *mnt)
{
kfree_const(mnt->mnt_devname);
#ifdef CONFIG_SMP
free_percpu(mnt->mnt_pcp);
#endif
kmem_cache_free(mnt_cache, mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 21 | 56.76% | 1 | 16.67% |
nick piggin | nick piggin | 12 | 32.43% | 2 | 33.33% |
al viro | al viro | 3 | 8.11% | 2 | 33.33% |
andrzej hajda | andrzej hajda | 1 | 2.70% | 1 | 16.67% |
| Total | 37 | 100.00% | 6 | 100.00% |
static void delayed_free_vfsmnt(struct rcu_head *head)
{
free_vfsmnt(container_of(head, struct mount, mnt_rcu));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david howells | david howells | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
/* call under rcu_read_lock */
int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{
struct mount *mnt;
if (read_seqretry(&mount_lock, seq))
return 1;
if (bastard == NULL)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
mnt_add_count(mnt, -1);
return 1;
}
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 96 | 100.00% | 2 | 100.00% |
| Total | 96 | 100.00% | 2 | 100.00% |
/* call under rcu_read_lock */
bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{
int res = __legitimize_mnt(bastard, seq);
if (likely(!res))
return true;
if (unlikely(res < 0)) {
rcu_read_unlock();
mntput(bastard);
rcu_read_lock();
}
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 59 | 100.00% | 2 | 100.00% |
| Total | 59 | 100.00% | 2 | 100.00% |
/*
* find the first mount at @dentry on vfsmount @mnt.
* call under rcu_read_lock()
*/
struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{
struct hlist_head *head = m_hash(mnt, dentry);
struct mount *p;
hlist_for_each_entry_rcu(p, head, mnt_hash)
if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
return p;
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 33 | 50.77% | 1 | 16.67% |
al viro | al viro | 30 | 46.15% | 4 | 66.67% |
ram pai | ram pai | 2 | 3.08% | 1 | 16.67% |
| Total | 65 | 100.00% | 6 | 100.00% |
/*
* find the last mount at @dentry on vfsmount @mnt.
* mount_lock must be held.
*/
struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
{
struct mount *p, *res = NULL;
p = __lookup_mnt(mnt, dentry);
if (!p)
goto out;
if (!(p->mnt.mnt_flags & MNT_UMOUNT))
res = p;
hlist_for_each_entry_continue(p, mnt_hash) {
if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
break;
if (!(p->mnt.mnt_flags & MNT_UMOUNT))
res = p;
}
out:
return res;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 69 | 63.30% | 4 | 57.14% |
eric w. biederman | eric w. biederman | 32 | 29.36% | 1 | 14.29% |
linus torvalds | linus torvalds | 5 | 4.59% | 1 | 14.29% |
ram pai | ram pai | 3 | 2.75% | 1 | 14.29% |
| Total | 109 | 100.00% | 7 | 100.00% |
/*
* lookup_mnt - Return the first child mount mounted at path
*
* "First" means first mounted chronologically. If you create the
* following mounts:
*
* mount /dev/sda1 /mnt
* mount /dev/sda2 /mnt
* mount /dev/sda3 /mnt
*
* Then lookup_mnt() on the base /mnt dentry in the root mount will
* return successively the root dentry and vfsmount of /dev/sda1, then
* /dev/sda2, then /dev/sda3, then NULL.
*
* lookup_mnt takes a reference to the found vfsmount.
*/
struct vfsmount *lookup_mnt(struct path *path)
{
struct mount *child_mnt;
struct vfsmount *m;
unsigned seq;
rcu_read_lock();
do {
seq = read_seqbegin(&mount_lock);
child_mnt = __lookup_mnt(path->mnt, path->dentry);
m = child_mnt ? &child_mnt->mnt : NULL;
} while (!legitimize_mnt(m, seq));
rcu_read_unlock();
return m;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 51 | 63.75% | 3 | 50.00% |
ram pai | ram pai | 26 | 32.50% | 1 | 16.67% |
linus torvalds | linus torvalds | 2 | 2.50% | 1 | 16.67% |
andi kleen | andi kleen | 1 | 1.25% | 1 | 16.67% |
| Total | 80 | 100.00% | 6 | 100.00% |
/*
* __is_local_mountpoint - Test to see if dentry is a mountpoint in the
* current mount namespace.
*
* The common case is dentries are not mountpoints at all and that
* test is handled inline. For the slow case when we are actually
* dealing with a mountpoint of some kind, walk through all of the
* mounts in the current mount namespace and test to see if the dentry
* is a mountpoint.
*
* The mount_hashtable is not usable in the context because we
* need to identify all mounts that may be in the current mount
* namespace not just a mount that happens to have some specified
* parent mount.
*/
bool __is_local_mountpoint(struct dentry *dentry)
{
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
struct mount *mnt;
bool is_covered = false;
if (!d_mountpoint(dentry))
goto out;
down_read(&namespace_sem);
list_for_each_entry(mnt, &ns->list, mnt_list) {
is_covered = (mnt->mnt_mountpoint == dentry);
if (is_covered)
break;
}
up_read(&namespace_sem);
out:
return is_covered;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric w. biederman | eric w. biederman | 84 | 100.00% | 1 | 100.00% |
| Total | 84 | 100.00% | 1 | 100.00% |
static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
{
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
/* might be worth a WARN_ON() */
if (d_unlinked(dentry))
return ERR_PTR(-ENOENT);
mp->m_count++;
return mp;
}
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 69 | 93.24% | 2 | 66.67% |
eric w. biederman | eric w. biederman | 5 | 6.76% | 1 | 33.33% |
| Total | 74 | 100.00% | 3 | 100.00% |
static struct mountpoint *new_mountpoint(struct dentry *dentry)
{
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
int ret;
mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
if (!mp)
return ERR_PTR(-ENOMEM);
ret = d_set_mounted(dentry);
if (ret) {
kfree(mp);
return ERR_PTR(ret);
}
mp->m_dentry = dentry;
mp->m_count = 1;
hlist_add_head(&mp->m_hash, chain);
INIT_HLIST_HEAD(&mp->m_list);
return mp;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 70 | 61.95% | 2 | 40.00% |
eric w. biederman | eric w. biederman | 38 | 33.63% | 2 | 40.00% |
miklos szeredi | miklos szeredi | 5 | 4.42% | 1 | 20.00% |
| Total | 113 | 100.00% | 5 | 100.00% |
static void put_mountpoint(struct mountpoint *mp)
{
if (!--mp->m_count) {
struct dentry *dentry = mp->m_dentry;
BUG_ON(!hlist_empty(&mp->m_list));
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
hlist_del(&mp->m_hash);
kfree(mp);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 66 | 84.62% | 2 | 66.67% |
eric w. biederman | eric w. biederman | 12 | 15.38% | 1 | 33.33% |
| Total | 78 | 100.00% | 3 | 100.00% |
static inline int check_mnt(struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 16 | 69.57% | 2 | 33.33% |
david howells | david howells | 2 | 8.70% | 1 | 16.67% |
serge hallyn | serge hallyn | 2 | 8.70% | 1 | 16.67% |
kirill korotaev | kirill korotaev | 2 | 8.70% | 1 | 16.67% |
al viro | al viro | 1 | 4.35% | 1 | 16.67% |
| Total | 23 | 100.00% | 6 | 100.00% |
/*
* vfsmount lock must be held for write
*/
static void touch_mnt_namespace(struct mnt_namespace *ns)
{
if (ns) {
ns->event = ++event;
wake_up_interruptible(&ns->poll);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 30 | 93.75% | 1 | 50.00% |
kirill korotaev | kirill korotaev | 2 | 6.25% | 1 | 50.00% |
| Total | 32 | 100.00% | 2 | 100.00% |
/*
* vfsmount lock must be held for write
*/
static void __touch_mnt_namespace(struct mnt_namespace *ns)
{
if (ns && ns->event != event) {
ns->event = event;
wake_up_interruptible(&ns->poll);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
al viro | al viro | 35 | 94.59% | 1 | 50.00% |
kirill korotaev | kirill korotaev | 2 | 5.41% | 1 | 50.00% |
| Total | 37 | 100.00% | 2 | 100.00% |
/*
* vfsmount lock must be held for write
*/
static void unhash_mnt(struct mount *mnt)
{
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
hlist_del_init_rcu(&mnt->mnt_hash);
hlist_del_init(&mnt->mnt_mp_list);
put_mountpoint(mnt->mnt_mp);
mnt->mnt_mp = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 33 | 51.56% | 1 | 14.29% |
al viro | al viro | 13 | 20.31% | 3 | 42.86% |
eric w. biederman | eric w. biederman | 9 | 14.06% | 2 | 28.57% |
nick piggin | nick piggin | 9 | 14.06% | 1 | 14.29% |
| Total | 64 | 100.00% | 7 | 100.00% |
/*
* vfsmount lock must be held for write
*/
static void detach_mnt(struct mount *mnt, struct path *old_path)
{
old_path->dentry = mnt->mnt_mountpoint;
old_path->mnt = &mnt->mnt_parent->mnt;
unhash_mnt(mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric w. biederman | eric w. biederman | 40 | 100.00% | 1 | 100.00% |
| Total | 40 | 100.00% | 1 | 100.00% |
/*
* vfsmount lock must be held for write
*/
static void umount_mnt(struct mount *mnt)
{
/* old mountpoint will be dropped when we can do that */
mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
unhash_mnt(mnt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric w. biederman | eric w. biederman | 25 | 100.00% | 1 | 100.00% |
| Total | 25 | 100.00% | 1 | 100.00% |
/*
* vfsmount lock must be held for write
*/
void mnt_set_mountpoint(struct mount *