Release 4.7 fs/locks.c
/*
* linux/fs/locks.c
*
* Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
* Doug Evans (dje@spiff.uucp), August 07, 1992
*
* Deadlock detection added.
* FIXME: one thing isn't handled yet:
* - mandatory locks (requires lots of changes elsewhere)
* Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
*
* Miscellaneous edits, and a total rewrite of posix_lock_file() code.
* Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
*
* Converted file_lock_table to a linked list from an array, which eliminates
* the limits on how many active file locks are open.
* Chad Page (pageone@netcom.com), November 27, 1994
*
* Removed dependency on file descriptors. dup()'ed file descriptors now
* get the same locks as the original file descriptors, and a close() on
* any file descriptor removes ALL the locks on the file for the current
* process. Since locks still depend on the process id, locks are inherited
* after an exec() but not after a fork(). This agrees with POSIX, and both
* BSD and SVR4 practice.
* Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
*
* Scrapped free list which is redundant now that we allocate locks
* dynamically with kmalloc()/kfree().
* Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
*
* Implemented two lock personalities - FL_FLOCK and FL_POSIX.
*
* FL_POSIX locks are created with calls to fcntl() and lockf() through the
* fcntl() system call. They have the semantics described above.
*
* FL_FLOCK locks are created with calls to flock(), through the flock()
* system call, which is new. Old C libraries implement flock() via fcntl()
* and will continue to use the old, broken implementation.
*
* FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
* with a file pointer (filp). As a result they can be shared by a parent
* process and its children after a fork(). They are removed when the last
* file descriptor referring to the file pointer is closed (unless explicitly
* unlocked).
*
* FL_FLOCK locks never deadlock, an existing lock is always removed before
* upgrading from shared to exclusive (or vice versa). When this happens
* any processes blocked by the current lock are woken up and allowed to
* run before the new lock is applied.
* Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
*
* Removed some race conditions in flock_lock_file(), marked other possible
* races. Just grep for FIXME to see them.
* Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
*
* Addressed Dmitry's concerns. Deadlock checking no longer recursive.
* Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
* once we've checked for blocking and deadlocking.
* Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
*
* Initial implementation of mandatory locks. SunOS turned out to be
* a rotten model, so I implemented the "obvious" semantics.
* See 'Documentation/filesystems/mandatory-locking.txt' for details.
* Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
*
* Don't allow mandatory locks on mmap()'ed files. Added simple functions to
* check if a file has mandatory locks, used by mmap(), open() and creat() to
* see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
* Manual, Section 2.
* Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
*
* Tidied up block list handling. Added '/proc/locks' interface.
* Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
*
* Fixed deadlock condition for pathological code that mixes calls to
* flock() and fcntl().
* Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
*
* Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
* for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
* guarantee sensible behaviour in the case where file system modules might
* be compiled with different options than the kernel itself.
* Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
*
* Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
* (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
* Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
*
* Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
* locks. Changed process synchronisation to avoid dereferencing locks that
* have already been freed.
* Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
*
* Made the block list a circular list to minimise searching in the list.
* Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
*
* Made mandatory locking a mount option. Default is not to allow mandatory
* locking.
* Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
*
* Some adaptations for NFS support.
* Olaf Kirch (okir@monad.swb.de), Dec 1996,
*
* Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
* Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
*
* Use slab allocator instead of kmalloc/kfree.
* Use generic list implementation from <linux/list.h>.
* Sped up posix_locks_deadlock by only considering blocked locks.
* Matthew Wilcox <willy@debian.org>, March, 2000.
*
* Leases and LOCK_MAND
* Matthew Wilcox <willy@debian.org>, June, 2000.
* Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
*/
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
#include <linux/hashtable.h>
#include <linux/percpu.h>
#include <linux/lglock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/filelock.h>
#include <asm/uaccess.h>
#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
static bool lease_breaking(struct file_lock *fl)
{
return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 22 | 100.00% | 3 | 100.00% |
| Total | 22 | 100.00% | 3 | 100.00% |
static int target_leasetype(struct file_lock *fl)
{
if (fl->fl_flags & FL_UNLOCK_PENDING)
return F_UNLCK;
if (fl->fl_flags & FL_DOWNGRADE_PENDING)
return F_RDLCK;
return fl->fl_type;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 38 | 100.00% | 2 | 100.00% |
| Total | 38 | 100.00% | 2 | 100.00% |
int leases_enable = 1;
int lease_break_time = 45;
/*
* The global file_lock_list is only used for displaying /proc/locks, so we
* keep a list on each CPU, with each list protected by its own spinlock via
* the file_lock_lglock. Note that alterations to the list also require that
* the relevant flc_lock is held.
*/
DEFINE_STATIC_LGLOCK(file_lock_lglock);
static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
/*
* The blocked_hash is used to find POSIX lock loops for deadlock detection.
* It is protected by blocked_lock_lock.
*
* We hash locks by lockowner in order to optimize searching for the lock a
* particular lockowner is waiting on.
*
* FIXME: make this value scale via some heuristic? We generally will want more
* buckets when we have more lockowners holding locks, but that's a little
* difficult to determine without knowing what the workload will look like.
*/
#define BLOCKED_HASH_BITS 7
static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
/*
* This lock protects the blocked_hash. Generally, if you're accessing it, you
* want to be holding this lock.
*
* In addition, it also protects the fl->fl_block list, and the fl->fl_next
* pointer for file_lock structures that are acting as lock requests (in
* contrast to those that are acting as records of acquired locks).
*
* Note that when we acquire this lock in order to change the above fields,
* we often hold the flc_lock as well. In certain cases, when reading the fields
* protected by this lock, we can skip acquiring it iff we already hold the
* flc_lock.
*
* In particular, adding an entry to the fl_block list requires that you hold
* both the flc_lock and the blocked_lock_lock (acquired in that order).
* Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
static struct kmem_cache *flctx_cache __read_mostly;
static struct kmem_cache *filelock_cache __read_mostly;
static struct file_lock_context *
locks_get_lock_context(struct inode *inode, int type)
{
struct file_lock_context *ctx;
/* paired with cmpxchg() below */
ctx = smp_load_acquire(&inode->i_flctx);
if (likely(ctx) || type == F_UNLCK)
goto out;
ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
if (!ctx)
goto out;
spin_lock_init(&ctx->flc_lock);
INIT_LIST_HEAD(&ctx->flc_flock);
INIT_LIST_HEAD(&ctx->flc_posix);
INIT_LIST_HEAD(&ctx->flc_lease);
/*
* Assign the pointer if it's not already assigned. If it is, then
* free the context we just allocated.
*/
if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
kmem_cache_free(flctx_cache, ctx);
ctx = smp_load_acquire(&inode->i_flctx);
}
out:
trace_locks_get_lock_context(inode, type, ctx);
return ctx;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 109 | 76.22% | 7 | 87.50% |
dmitriy vyukov | dmitriy vyukov | 34 | 23.78% | 1 | 12.50% |
| Total | 143 | 100.00% | 8 | 100.00% |
static void
locks_dump_ctx_list(struct list_head *list, char *list_type)
{
struct file_lock *fl;
list_for_each_entry(fl, list, fl_list) {
pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 53 | 100.00% | 2 | 100.00% |
| Total | 53 | 100.00% | 2 | 100.00% |
static void
locks_check_ctx_lists(struct inode *inode)
{
struct file_lock_context *ctx = inode->i_flctx;
if (unlikely(!list_empty(&ctx->flc_flock) ||
!list_empty(&ctx->flc_posix) ||
!list_empty(&ctx->flc_lease))) {
pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
inode->i_ino);
locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 111 | 100.00% | 5 | 100.00% |
| Total | 111 | 100.00% | 5 | 100.00% |
void
locks_free_lock_context(struct inode *inode)
{
struct file_lock_context *ctx = inode->i_flctx;
if (unlikely(ctx)) {
locks_check_ctx_lists(inode);
kmem_cache_free(flctx_cache, ctx);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 40 | 100.00% | 3 | 100.00% |
| Total | 40 | 100.00% | 3 | 100.00% |
static void locks_init_lock_heads(struct file_lock *fl)
{
INIT_HLIST_NODE(&fl->fl_link);
INIT_LIST_HEAD(&fl->fl_list);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 34 | 79.07% | 2 | 50.00% |
jeff layton | jeff layton | 9 | 20.93% | 2 | 50.00% |
| Total | 43 | 100.00% | 4 | 100.00% |
/* Allocate an empty lock structure. */
struct file_lock *locks_alloc_lock(void)
{
struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
if (fl)
locks_init_lock_heads(fl);
return fl;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
miklos szeredi | miklos szeredi | 18 | 54.55% | 2 | 28.57% |
pre-git | pre-git | 13 | 39.39% | 3 | 42.86% |
matthew wilcox | matthew wilcox | 1 | 3.03% | 1 | 14.29% |
christoph lameter | christoph lameter | 1 | 3.03% | 1 | 14.29% |
| Total | 33 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(locks_alloc_lock);
void locks_release_private(struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
}
if (fl->fl_lmops) {
if (fl->fl_lmops->lm_put_owner) {
fl->fl_lmops->lm_put_owner(fl->fl_owner);
fl->fl_owner = NULL;
}
fl->fl_lmops = NULL;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 47 | 57.32% | 1 | 33.33% |
kinglong mee | kinglong mee | 25 | 30.49% | 1 | 33.33% |
jeff layton | jeff layton | 10 | 12.20% | 1 | 33.33% |
| Total | 82 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(locks_release_private);
/* Free a lock which is not in use. */
void locks_free_lock(struct file_lock *fl)
{
BUG_ON(waitqueue_active(&fl->fl_wait));
BUG_ON(!list_empty(&fl->fl_list));
BUG_ON(!list_empty(&fl->fl_block));
BUG_ON(!hlist_unhashed(&fl->fl_link));
locks_release_private(fl);
kmem_cache_free(filelock_cache, fl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 45 | 65.22% | 2 | 28.57% |
jeff layton | jeff layton | 13 | 18.84% | 2 | 28.57% |
miklos szeredi | miklos szeredi | 6 | 8.70% | 1 | 14.29% |
william a. adamson | william a. adamson | 4 | 5.80% | 1 | 14.29% |
trond myklebust | trond myklebust | 1 | 1.45% | 1 | 14.29% |
| Total | 69 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(locks_free_lock);
static void
locks_dispose_list(struct list_head *dispose)
{
struct file_lock *fl;
while (!list_empty(dispose)) {
fl = list_first_entry(dispose, struct file_lock, fl_list);
list_del_init(&fl->fl_list);
locks_free_lock(fl);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 51 | 100.00% | 2 | 100.00% |
| Total | 51 | 100.00% | 2 | 100.00% |
void locks_init_lock(struct file_lock *fl)
{
memset(fl, 0, sizeof(struct file_lock));
locks_init_lock_heads(fl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 16 | 57.14% | 1 | 33.33% |
miklos szeredi | miklos szeredi | 12 | 42.86% | 2 | 66.67% |
| Total | 28 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(locks_init_lock);
/*
* Initialize a new lock from an existing file_lock structure.
*/
void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
{
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
new->fl_file = NULL;
new->fl_flags = fl->fl_flags;
new->fl_type = fl->fl_type;
new->fl_start = fl->fl_start;
new->fl_end = fl->fl_end;
new->fl_lmops = fl->fl_lmops;
new->fl_ops = NULL;
if (fl->fl_lmops) {
if (fl->fl_lmops->lm_get_owner)
fl->fl_lmops->lm_get_owner(fl->fl_owner);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 69 | 62.73% | 12 | 75.00% |
kinglong mee | kinglong mee | 31 | 28.18% | 2 | 12.50% |
trond myklebust | trond myklebust | 8 | 7.27% | 1 | 6.25% |
jeff layton | jeff layton | 2 | 1.82% | 1 | 6.25% |
| Total | 110 | 100.00% | 16 | 100.00% |
EXPORT_SYMBOL(locks_copy_conflock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
/* "new" must be a freshly-initialized lock */
WARN_ON_ONCE(new->fl_ops);
locks_copy_conflock(new, fl);
new->fl_file = fl->fl_file;
new->fl_ops = fl->fl_ops;
if (fl->fl_ops) {
if (fl->fl_ops->fl_copy_lock)
fl->fl_ops->fl_copy_lock(new, fl);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 41 | 56.16% | 2 | 25.00% |
kinglong mee | kinglong mee | 19 | 26.03% | 2 | 25.00% |
william a. adamson | william a. adamson | 6 | 8.22% | 1 | 12.50% |
jeff layton | jeff layton | 4 | 5.48% | 1 | 12.50% |
pre-git | pre-git | 3 | 4.11% | 2 | 25.00% |
| Total | 73 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(locks_copy_lock);
static inline int flock_translate_cmd(int cmd) {
if (cmd & LOCK_MAND)
return cmd & (LOCK_MAND | LOCK_RW);
switch (cmd) {
case LOCK_SH:
return F_RDLCK;
case LOCK_EX:
return F_WRLCK;
case LOCK_UN:
return F_UNLCK;
}
return -EINVAL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matthew wilcox | matthew wilcox | 53 | 100.00% | 1 | 100.00% |
| Total | 53 | 100.00% | 1 | 100.00% |
/* Fill in a file_lock structure with an appropriate FLOCK lock. */
static struct file_lock *
flock_make_lock(struct file *filp, unsigned int cmd)
{
struct file_lock *fl;
int type = flock_translate_cmd(cmd);
if (type < 0)
return ERR_PTR(type);
fl = locks_alloc_lock();
if (fl == NULL)
return ERR_PTR(-ENOMEM);
fl->fl_file = filp;
fl->fl_owner = filp;
fl->fl_pid = current->tgid;
fl->fl_flags = FL_FLOCK;
fl->fl_type = type;
fl->fl_end = OFFSET_MAX;
return fl;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 60 | 59.41% | 6 | 54.55% |
matthew wilcox | matthew wilcox | 24 | 23.76% | 2 | 18.18% |
jeff layton | jeff layton | 16 | 15.84% | 2 | 18.18% |
ingo molnar | ingo molnar | 1 | 0.99% | 1 | 9.09% |
| Total | 101 | 100.00% | 11 | 100.00% |
static int assign_type(struct file_lock *fl, long type)
{
switch (type) {
case F_RDLCK:
case F_WRLCK:
case F_UNLCK:
fl->fl_type = type;
break;
default:
return -EINVAL;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 43 | 97.73% | 1 | 50.00% |
j. bruce fields | j. bruce fields | 1 | 2.27% | 1 | 50.00% |
| Total | 44 | 100.00% | 2 | 100.00% |
static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
struct flock64 *l)
{
switch (l->l_whence) {
case SEEK_SET:
fl->fl_start = 0;
break;
case SEEK_CUR:
fl->fl_start = filp->f_pos;
break;
case SEEK_END:
fl->fl_start = i_size_read(file_inode(filp));
break;
default:
return -EINVAL;
}
if (l->l_start > OFFSET_MAX - fl->fl_start)
return -EOVERFLOW;
fl->fl_start += l->l_start;
if (fl->fl_start < 0)
return -EINVAL;
/* POSIX-1996 leaves the case l->l_len < 0 undefined;
POSIX-2001 defines it. */
if (l->l_len > 0) {
if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
return -EOVERFLOW;
fl->fl_end = fl->fl_start + l->l_len - 1;
} else if (l->l_len < 0) {
if (fl->fl_start + l->l_len < 0)
return -EINVAL;
fl->fl_end = fl->fl_start - 1;
fl->fl_start += l->l_len;
} else
fl->fl_end = OFFSET_MAX;
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
fl->fl_flags = FL_POSIX;
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
return assign_type(fl, l->l_type);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 107 | 42.29% | 8 | 47.06% |
j. bruce fields | j. bruce fields | 78 | 30.83% | 1 | 5.88% |
trond myklebust | trond myklebust | 40 | 15.81% | 2 | 11.76% |
andries brouwer | andries brouwer | 16 | 6.32% | 1 | 5.88% |
al viro | al viro | 3 | 1.19% | 1 | 5.88% |
josef sipek | josef sipek | 3 | 1.19% | 1 | 5.88% |
andrew morton | andrew morton | 3 | 1.19% | 1 | 5.88% |
linus torvalds | linus torvalds | 2 | 0.79% | 1 | 5.88% |
ingo molnar | ingo molnar | 1 | 0.40% | 1 | 5.88% |
| Total | 253 | 100.00% | 17 | 100.00% |
/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
* style lock.
*/
static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
struct flock *l)
{
struct flock64 ll = {
.l_type = l->l_type,
.l_whence = l->l_whence,
.l_start = l->l_start,
.l_len = l->l_len,
};
return flock64_to_posix_lock(filp, fl, &ll);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 29 | 43.94% | 1 | 25.00% |
j. bruce fields | j. bruce fields | 26 | 39.39% | 1 | 25.00% |
trond myklebust | trond myklebust | 7 | 10.61% | 1 | 25.00% |
namhyung kim | namhyung kim | 4 | 6.06% | 1 | 25.00% |
| Total | 66 | 100.00% | 4 | 100.00% |
/* default lease lock manager operations */
static bool
lease_break_callback(struct file_lock *fl)
{
kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
william a. adamson | william a. adamson | 22 | 84.62% | 1 | 50.00% |
jeff layton | jeff layton | 4 | 15.38% | 1 | 50.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
static void
lease_setup(struct file_lock *fl, void **priv)
{
struct file *filp = fl->fl_file;
struct fasync_struct *fa = *priv;
/*
* fasync_insert_entry() returns the old entry if any. If there was no
* old entry, then it used "priv" and inserted it into the fasync list.
* Clear the pointer to indicate that it shouldn't be freed.
*/
if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
*priv = NULL;
__f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 72 | 100.00% | 1 | 100.00% |
| Total | 72 | 100.00% | 1 | 100.00% |
static const struct lock_manager_operations lease_manager_ops = {
.lm_break = lease_break_callback,
.lm_change = lease_modify,
.lm_setup = lease_setup,
};
/*
* Initialize a lease, use the default lock manager operations
*/
static int lease_init(struct file *filp, long type, struct file_lock *fl)
{
if (assign_type(fl, type) != 0)
return -EINVAL;
fl->fl_owner = filp;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
fl->fl_flags = FL_LEASE;
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
fl->fl_ops = NULL;
fl->fl_lmops = &lease_manager_ops;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 53 | 60.23% | 1 | 14.29% |
trond myklebust | trond myklebust | 23 | 26.14% | 2 | 28.57% |
william a. adamson | william a. adamson | 9 | 10.23% | 1 | 14.29% |
j. bruce fields | j. bruce fields | 1 | 1.14% | 1 | 14.29% |
ingo molnar | ingo molnar | 1 | 1.14% | 1 | 14.29% |
jeff layton | jeff layton | 1 | 1.14% | 1 | 14.29% |
| Total | 88 | 100.00% | 7 | 100.00% |
/* Allocate a file_lock initialised to this type of lease */
static struct file_lock *lease_alloc(struct file *filp, long type)
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
if (fl == NULL)
return ERR_PTR(error);
error = lease_init(filp, type, fl);
if (error) {
locks_free_lock(fl);
return ERR_PTR(error);
}
return fl;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
william a. adamson | william a. adamson | 41 | 56.16% | 1 | 16.67% |
j. bruce fields | j. bruce fields | 17 | 23.29% | 2 | 33.33% |
trond myklebust | trond myklebust | 12 | 16.44% | 2 | 33.33% |
pre-git | pre-git | 3 | 4.11% | 1 | 16.67% |
| Total | 73 | 100.00% | 6 | 100.00% |
/* Check if two locks overlap each other.
*/
static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
{
return ((fl1->fl_end >= fl2->fl_start) &&
(fl2->fl_end >= fl1->fl_start));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 40 | 100.00% | 3 | 100.00% |
| Total | 40 | 100.00% | 3 | 100.00% |
/*
* Check whether two locks have the same owner.
*/
static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
{
if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
return fl2->fl_lmops == fl1->fl_lmops &&
fl1->fl_lmops->lm_compare_owner(fl1, fl2);
return fl1->fl_owner == fl2->fl_owner;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 30 | 52.63% | 2 | 28.57% |
pre-git | pre-git | 24 | 42.11% | 3 | 42.86% |
j. bruce fields | j. bruce fields | 2 | 3.51% | 1 | 14.29% |
matthew wilcox | matthew wilcox | 1 | 1.75% | 1 | 14.29% |
| Total | 57 | 100.00% | 7 | 100.00% |
/* Must be called with the flc_lock held! */
static void locks_insert_global_locks(struct file_lock *fl)
{
lg_local_lock(&file_lock_lglock);
fl->fl_link_cpu = smp_processor_id();
hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
lg_local_unlock(&file_lock_lglock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 44 | 100.00% | 4 | 100.00% |
| Total | 44 | 100.00% | 4 | 100.00% |
/* Must be called with the flc_lock held! */
static void locks_delete_global_locks(struct file_lock *fl)
{
/*
* Avoid taking lock if already unhashed. This is safe since this check
* is done while holding the flc_lock, and new insertions into the list
* also require that it be held.
*/
if (hlist_unhashed(&fl->fl_link))
return;
lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
hlist_del_init(&fl->fl_link);
lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 51 | 100.00% | 5 | 100.00% |
| Total | 51 | 100.00% | 5 | 100.00% |
static unsigned long
posix_owner_key(struct file_lock *fl)
{
if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
return fl->fl_lmops->lm_owner_key(fl);
return (unsigned long)fl->fl_owner;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 43 | 100.00% | 2 | 100.00% |
| Total | 43 | 100.00% | 2 | 100.00% |
static void locks_insert_global_blocked(struct file_lock *waiter)
{
lockdep_assert_held(&blocked_lock_lock);
hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 26 | 81.25% | 3 | 75.00% |
daniel wagner | daniel wagner | 6 | 18.75% | 1 | 25.00% |
| Total | 32 | 100.00% | 4 | 100.00% |
static void locks_delete_global_blocked(struct file_lock *waiter)
{
lockdep_assert_held(&blocked_lock_lock);
hash_del(&waiter->fl_link);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 19 | 76.00% | 2 | 66.67% |
daniel wagner | daniel wagner | 6 | 24.00% | 1 | 33.33% |
| Total | 25 | 100.00% | 3 | 100.00% |
/* Remove waiter from blocker's block list.
* When blocker ends up pointing to itself then the list is empty.
*
* Must be called with blocked_lock_lock held.
*/
static void __locks_delete_block(struct file_lock *waiter)
{
locks_delete_global_blocked(waiter);
list_del_init(&waiter->fl_block);
waiter->fl_next = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 26 | 86.67% | 4 | 57.14% |
jeff layton | jeff layton | 2 | 6.67% | 1 | 14.29% |
matthew wilcox | matthew wilcox | 1 | 3.33% | 1 | 14.29% |
stephen rothwell | stephen rothwell | 1 | 3.33% | 1 | 14.29% |
| Total | 30 | 100.00% | 7 | 100.00% |
static void locks_delete_block(struct file_lock *waiter)
{
spin_lock(&blocked_lock_lock);
__locks_delete_block(waiter);
spin_unlock(&blocked_lock_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matthew wilcox | matthew wilcox | 17 | 60.71% | 1 | 25.00% |
jeff layton | jeff layton | 11 | 39.29% | 3 | 75.00% |
| Total | 28 | 100.00% | 4 | 100.00% |
/* Insert waiter into blocker's block list.
* We use a circular list so that processes can be easily woken up in
* the order they blocked. The documentation doesn't require this but
* it seems like the reasonable thing to do.
*
* Must be called with both the flc_lock and blocked_lock_lock held. The
* fl_block list itself is protected by the blocked_lock_lock, but by ensuring
* that the flc_lock is also held on insertions we can avoid taking the
* blocked_lock_lock in some cases when we see that the fl_block list is empty.
*/
static void __locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
BUG_ON(!list_empty(&waiter->fl_block));
waiter->fl_next = blocker;
list_add_tail(&waiter->fl_block, &blocker->fl_block);
if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
locks_insert_global_blocked(waiter);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 38 | 58.46% | 6 | 50.00% |
jeff layton | jeff layton | 18 | 27.69% | 4 | 33.33% |
matthew wilcox | matthew wilcox | 7 | 10.77% | 1 | 8.33% |
j. bruce fields | j. bruce fields | 2 | 3.08% | 1 | 8.33% |
| Total | 65 | 100.00% | 12 | 100.00% |
/* Must be called with flc_lock held. */
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
spin_lock(&blocked_lock_lock);
__locks_insert_block(blocker, waiter);
spin_unlock(&blocked_lock_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 32 | 91.43% | 2 | 50.00% |
pre-git | pre-git | 3 | 8.57% | 2 | 50.00% |
| Total | 35 | 100.00% | 4 | 100.00% |
/*
* Wake up processes blocked waiting for blocker.
*
* Must be called with the inode->flc_lock held!
*/
static void locks_wake_up_blocks(struct file_lock *blocker)
{
/*
* Avoid taking global lock if list is empty. This is safe since new
* blocked requests are only added to the list under the flc_lock, and
* the flc_lock is always held here. Note that removal from the fl_block
* list does not require the flc_lock, so we must recheck list_empty()
* after acquiring the blocked_lock_lock.
*/
if (list_empty(&blocker->fl_block))
return;
spin_lock(&blocked_lock_lock);
while (!list_empty(&blocker->fl_block)) {
struct file_lock *waiter;
waiter = list_first_entry(&blocker->fl_block,
struct file_lock, fl_block);
__locks_delete_block(waiter);
if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
waiter->fl_lmops->lm_notify(waiter);
else
wake_up(&waiter->fl_wait);
}
spin_unlock(&blocked_lock_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 46 | 44.66% | 5 | 35.71% |
jeff layton | jeff layton | 24 | 23.30% | 4 | 28.57% |
matthew wilcox | matthew wilcox | 17 | 16.50% | 2 | 14.29% |
trond myklebust | trond myklebust | 8 | 7.77% | 1 | 7.14% |
pavel emelianov | pavel emelianov | 6 | 5.83% | 1 | 7.14% |
j. bruce fields | j. bruce fields | 2 | 1.94% | 1 | 7.14% |
| Total | 103 | 100.00% | 14 | 100.00% |
static void
locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
{
fl->fl_nspid = get_pid(task_tgid(current));
list_add_tail(&fl->fl_list, before);
locks_insert_global_locks(fl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 43 | 100.00% | 1 | 100.00% |
| Total | 43 | 100.00% | 1 | 100.00% |
static void
locks_unlink_lock_ctx(struct file_lock *fl)
{
locks_delete_global_locks(fl);
list_del_init(&fl->fl_list);
if (fl->fl_nspid) {
put_pid(fl->fl_nspid);
fl->fl_nspid = NULL;
}
locks_wake_up_blocks(fl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 20 | 40.00% | 4 | 50.00% |
vitaliy gusev | vitaliy gusev | 17 | 34.00% | 1 | 12.50% |
jeff layton | jeff layton | 13 | 26.00% | 3 | 37.50% |
| Total | 50 | 100.00% | 8 | 100.00% |
static void
locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
{
locks_unlink_lock_ctx(fl);
if (dispose)
list_add(&fl->fl_list, dispose);
else
locks_free_lock(fl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 35 | 85.37% | 4 | 80.00% |
pre-git | pre-git | 6 | 14.63% | 1 | 20.00% |
| Total | 41 | 100.00% | 5 | 100.00% |
/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
* checks for shared/exclusive status of overlapping locks.
*/
static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{
if (sys_fl->fl_type == F_WRLCK)
return 1;
if (caller_fl->fl_type == F_WRLCK)
return 1;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 31 | 75.61% | 2 | 66.67% |
matthew wilcox | matthew wilcox | 10 | 24.39% | 1 | 33.33% |
| Total | 41 | 100.00% | 3 | 100.00% |
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
* checking before calling the locks_conflict().
*/
static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{
/* POSIX locks owned by the same process do not conflict with
* each other.
*/
if (posix_same_owner(caller_fl, sys_fl))
return (0);
/* Check whether they overlap */
if (!locks_overlap(caller_fl, sys_fl))
return 0;
return (locks_conflict(caller_fl, sys_fl));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 54 | 98.18% | 5 | 83.33% |
matthew wilcox | matthew wilcox | 1 | 1.82% | 1 | 16.67% |
| Total | 55 | 100.00% | 6 | 100.00% |
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
* checking before calling the locks_conflict().
*/
static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{
/* FLOCK locks referring to the same filp do not conflict with
* each other.
*/
if (caller_fl->fl_file == sys_fl->fl_file)
return (0);
if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
return 0;
return (locks_conflict(caller_fl, sys_fl));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 63 | 100.00% | 5 | 100.00% |
| Total | 63 | 100.00% | 5 | 100.00% |
void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
struct file_lock_context *ctx;
struct inode *inode = file_inode(filp);
ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx || list_empty_careful(&ctx->flc_posix)) {
fl->fl_type = F_UNLCK;
return;
}
spin_lock(&ctx->flc_lock);
list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
if (posix_locks_conflict(fl, cfl)) {
locks_copy_conflock(fl, cfl);
if (cfl->fl_nspid)
fl->fl_pid = pid_vnr(cfl->fl_nspid);
goto out;
}
}
fl->fl_type = F_UNLCK;
out:
spin_unlock(