cregit-Linux how code gets into the kernel

Release 4.7 fs/locks.c

Directory: fs
/*
 *  linux/fs/locks.c
 *
 *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
 *  Doug Evans (dje@spiff.uucp), August 07, 1992
 *
 *  Deadlock detection added.
 *  FIXME: one thing isn't handled yet:
 *      - mandatory locks (requires lots of changes elsewhere)
 *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
 *
 *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
 *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
 *  
 *  Converted file_lock_table to a linked list from an array, which eliminates
 *  the limits on how many active file locks are open.
 *  Chad Page (pageone@netcom.com), November 27, 1994
 * 
 *  Removed dependency on file descriptors. dup()'ed file descriptors now
 *  get the same locks as the original file descriptors, and a close() on
 *  any file descriptor removes ALL the locks on the file for the current
 *  process. Since locks still depend on the process id, locks are inherited
 *  after an exec() but not after a fork(). This agrees with POSIX, and both
 *  BSD and SVR4 practice.
 *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
 *
 *  Scrapped free list which is redundant now that we allocate locks
 *  dynamically with kmalloc()/kfree().
 *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
 *
 *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
 *
 *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
 *  fcntl() system call. They have the semantics described above.
 *
 *  FL_FLOCK locks are created with calls to flock(), through the flock()
 *  system call, which is new. Old C libraries implement flock() via fcntl()
 *  and will continue to use the old, broken implementation.
 *
 *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
 *  with a file pointer (filp). As a result they can be shared by a parent
 *  process and its children after a fork(). They are removed when the last
 *  file descriptor referring to the file pointer is closed (unless explicitly
 *  unlocked). 
 *
 *  FL_FLOCK locks never deadlock, an existing lock is always removed before
 *  upgrading from shared to exclusive (or vice versa). When this happens
 *  any processes blocked by the current lock are woken up and allowed to
 *  run before the new lock is applied.
 *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
 *
 *  Removed some race conditions in flock_lock_file(), marked other possible
 *  races. Just grep for FIXME to see them. 
 *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
 *
 *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
 *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
 *  once we've checked for blocking and deadlocking.
 *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
 *
 *  Initial implementation of mandatory locks. SunOS turned out to be
 *  a rotten model, so I implemented the "obvious" semantics.
 *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
 *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
 *
 *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
 *  check if a file has mandatory locks, used by mmap(), open() and creat() to
 *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
 *  Manual, Section 2.
 *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
 *
 *  Tidied up block list handling. Added '/proc/locks' interface.
 *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
 *
 *  Fixed deadlock condition for pathological code that mixes calls to
 *  flock() and fcntl().
 *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
 *
 *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
 *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
 *  guarantee sensible behaviour in the case where file system modules might
 *  be compiled with different options than the kernel itself.
 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
 *
 *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
 *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
 *
 *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
 *  locks. Changed process synchronisation to avoid dereferencing locks that
 *  have already been freed.
 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
 *
 *  Made the block list a circular list to minimise searching in the list.
 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
 *
 *  Made mandatory locking a mount option. Default is not to allow mandatory
 *  locking.
 *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
 *
 *  Some adaptations for NFS support.
 *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
 *
 *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
 *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
 *
 *  Use slab allocator instead of kmalloc/kfree.
 *  Use generic list implementation from <linux/list.h>.
 *  Sped up posix_locks_deadlock by only considering blocked locks.
 *  Matthew Wilcox <willy@debian.org>, March, 2000.
 *
 *  Leases and LOCK_MAND
 *  Matthew Wilcox <willy@debian.org>, June, 2000.
 *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
 */

#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
#include <linux/hashtable.h>
#include <linux/percpu.h>
#include <linux/lglock.h>


#define CREATE_TRACE_POINTS
#include <trace/events/filelock.h>

#include <asm/uaccess.h>


#define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)

#define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)

#define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))

#define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)


static bool lease_breaking(struct file_lock *fl) { return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); }

Contributors

PersonTokensPropCommitsCommitProp
j. bruce fieldsj. bruce fields22100.00%3100.00%
Total22100.00%3100.00%


static int target_leasetype(struct file_lock *fl) { if (fl->fl_flags & FL_UNLOCK_PENDING) return F_UNLCK; if (fl->fl_flags & FL_DOWNGRADE_PENDING) return F_RDLCK; return fl->fl_type; }

Contributors

PersonTokensPropCommitsCommitProp
j. bruce fieldsj. bruce fields38100.00%2100.00%
Total38100.00%2100.00%

int leases_enable = 1; int lease_break_time = 45; /* * The global file_lock_list is only used for displaying /proc/locks, so we * keep a list on each CPU, with each list protected by its own spinlock via * the file_lock_lglock. Note that alterations to the list also require that * the relevant flc_lock is held. */ DEFINE_STATIC_LGLOCK(file_lock_lglock); static DEFINE_PER_CPU(struct hlist_head, file_lock_list); /* * The blocked_hash is used to find POSIX lock loops for deadlock detection. * It is protected by blocked_lock_lock. * * We hash locks by lockowner in order to optimize searching for the lock a * particular lockowner is waiting on. * * FIXME: make this value scale via some heuristic? We generally will want more * buckets when we have more lockowners holding locks, but that's a little * difficult to determine without knowing what the workload will look like. */ #define BLOCKED_HASH_BITS 7 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); /* * This lock protects the blocked_hash. Generally, if you're accessing it, you * want to be holding this lock. * * In addition, it also protects the fl->fl_block list, and the fl->fl_next * pointer for file_lock structures that are acting as lock requests (in * contrast to those that are acting as records of acquired locks). * * Note that when we acquire this lock in order to change the above fields, * we often hold the flc_lock as well. In certain cases, when reading the fields * protected by this lock, we can skip acquiring it iff we already hold the * flc_lock. * * In particular, adding an entry to the fl_block list requires that you hold * both the flc_lock and the blocked_lock_lock (acquired in that order). * Deleting an entry from the list however only requires the file_lock_lock. */ static DEFINE_SPINLOCK(blocked_lock_lock); static struct kmem_cache *flctx_cache __read_mostly; static struct kmem_cache *filelock_cache __read_mostly;
static struct file_lock_context * locks_get_lock_context(struct inode *inode, int type) { struct file_lock_context *ctx; /* paired with cmpxchg() below */ ctx = smp_load_acquire(&inode->i_flctx); if (likely(ctx) || type == F_UNLCK) goto out; ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL); if (!ctx) goto out; spin_lock_init(&ctx->flc_lock); INIT_LIST_HEAD(&ctx->flc_flock); INIT_LIST_HEAD(&ctx->flc_posix); INIT_LIST_HEAD(&ctx->flc_lease); /* * Assign the pointer if it's not already assigned. If it is, then * free the context we just allocated. */ if (cmpxchg(&inode->i_flctx, NULL, ctx)) { kmem_cache_free(flctx_cache, ctx); ctx = smp_load_acquire(&inode->i_flctx); } out: trace_locks_get_lock_context(inode, type, ctx); return ctx; }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton10976.22%787.50%
dmitriy vyukovdmitriy vyukov3423.78%112.50%
Total143100.00%8100.00%


static void locks_dump_ctx_list(struct list_head *list, char *list_type) { struct file_lock *fl; list_for_each_entry(fl, list, fl_list) { pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); } }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton53100.00%2100.00%
Total53100.00%2100.00%


static void locks_check_ctx_lists(struct inode *inode) { struct file_lock_context *ctx = inode->i_flctx; if (unlikely(!list_empty(&ctx->flc_flock) || !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_lease))) { pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n", MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); locks_dump_ctx_list(&ctx->flc_flock, "FLOCK"); locks_dump_ctx_list(&ctx->flc_posix, "POSIX"); locks_dump_ctx_list(&ctx->flc_lease, "LEASE"); } }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton111100.00%5100.00%
Total111100.00%5100.00%


void locks_free_lock_context(struct inode *inode) { struct file_lock_context *ctx = inode->i_flctx; if (unlikely(ctx)) { locks_check_ctx_lists(inode); kmem_cache_free(flctx_cache, ctx); } }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton40100.00%3100.00%
Total40100.00%3100.00%


static void locks_init_lock_heads(struct file_lock *fl) { INIT_HLIST_NODE(&fl->fl_link); INIT_LIST_HEAD(&fl->fl_list); INIT_LIST_HEAD(&fl->fl_block); init_waitqueue_head(&fl->fl_wait); }

Contributors

PersonTokensPropCommitsCommitProp
miklos szeredimiklos szeredi3479.07%250.00%
jeff laytonjeff layton920.93%250.00%
Total43100.00%4100.00%

/* Allocate an empty lock structure. */
struct file_lock *locks_alloc_lock(void) { struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); if (fl) locks_init_lock_heads(fl); return fl; }

Contributors

PersonTokensPropCommitsCommitProp
miklos szeredimiklos szeredi1854.55%228.57%
pre-gitpre-git1339.39%342.86%
matthew wilcoxmatthew wilcox13.03%114.29%
christoph lameterchristoph lameter13.03%114.29%
Total33100.00%7100.00%

EXPORT_SYMBOL_GPL(locks_alloc_lock);
void locks_release_private(struct file_lock *fl) { if (fl->fl_ops) { if (fl->fl_ops->fl_release_private) fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; } if (fl->fl_lmops) { if (fl->fl_lmops->lm_put_owner) { fl->fl_lmops->lm_put_owner(fl->fl_owner); fl->fl_owner = NULL; } fl->fl_lmops = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
trond myklebusttrond myklebust4757.32%133.33%
kinglong meekinglong mee2530.49%133.33%
jeff laytonjeff layton1012.20%133.33%
Total82100.00%3100.00%

EXPORT_SYMBOL_GPL(locks_release_private); /* Free a lock which is not in use. */
void locks_free_lock(struct file_lock *fl) { BUG_ON(waitqueue_active(&fl->fl_wait)); BUG_ON(!list_empty(&fl->fl_list)); BUG_ON(!list_empty(&fl->fl_block)); BUG_ON(!hlist_unhashed(&fl->fl_link)); locks_release_private(fl); kmem_cache_free(filelock_cache, fl); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git4565.22%228.57%
jeff laytonjeff layton1318.84%228.57%
miklos szeredimiklos szeredi68.70%114.29%
william a. adamsonwilliam a. adamson45.80%114.29%
trond myklebusttrond myklebust11.45%114.29%
Total69100.00%7100.00%

EXPORT_SYMBOL(locks_free_lock);
static void locks_dispose_list(struct list_head *dispose) { struct file_lock *fl; while (!list_empty(dispose)) { fl = list_first_entry(dispose, struct file_lock, fl_list); list_del_init(&fl->fl_list); locks_free_lock(fl); } }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton51100.00%2100.00%
Total51100.00%2100.00%


void locks_init_lock(struct file_lock *fl) { memset(fl, 0, sizeof(struct file_lock)); locks_init_lock_heads(fl); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git1657.14%133.33%
miklos szeredimiklos szeredi1242.86%266.67%
Total28100.00%3100.00%

EXPORT_SYMBOL(locks_init_lock); /* * Initialize a new lock from an existing file_lock structure. */
void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { new->fl_owner = fl->fl_owner; new->fl_pid = fl->fl_pid; new->fl_file = NULL; new->fl_flags = fl->fl_flags; new->fl_type = fl->fl_type; new->fl_start = fl->fl_start; new->fl_end = fl->fl_end; new->fl_lmops = fl->fl_lmops; new->fl_ops = NULL; if (fl->fl_lmops) { if (fl->fl_lmops->lm_get_owner) fl->fl_lmops->lm_get_owner(fl->fl_owner); } }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git6962.73%1275.00%
kinglong meekinglong mee3128.18%212.50%
trond myklebusttrond myklebust87.27%16.25%
jeff laytonjeff layton21.82%16.25%
Total110100.00%16100.00%

EXPORT_SYMBOL(locks_copy_conflock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl) { /* "new" must be a freshly-initialized lock */ WARN_ON_ONCE(new->fl_ops); locks_copy_conflock(new, fl); new->fl_file = fl->fl_file; new->fl_ops = fl->fl_ops; if (fl->fl_ops) { if (fl->fl_ops->fl_copy_lock) fl->fl_ops->fl_copy_lock(new, fl); } }

Contributors

PersonTokensPropCommitsCommitProp
trond myklebusttrond myklebust4156.16%225.00%
kinglong meekinglong mee1926.03%225.00%
william a. adamsonwilliam a. adamson68.22%112.50%
jeff laytonjeff layton45.48%112.50%
pre-gitpre-git34.11%225.00%
Total73100.00%8100.00%

EXPORT_SYMBOL(locks_copy_lock);
static inline int flock_translate_cmd(int cmd) { if (cmd & LOCK_MAND) return cmd & (LOCK_MAND | LOCK_RW); switch (cmd) { case LOCK_SH: return F_RDLCK; case LOCK_EX: return F_WRLCK; case LOCK_UN: return F_UNLCK; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
matthew wilcoxmatthew wilcox53100.00%1100.00%
Total53100.00%1100.00%

/* Fill in a file_lock structure with an appropriate FLOCK lock. */
static struct file_lock * flock_make_lock(struct file *filp, unsigned int cmd) { struct file_lock *fl; int type = flock_translate_cmd(cmd); if (type < 0) return ERR_PTR(type); fl = locks_alloc_lock(); if (fl == NULL) return ERR_PTR(-ENOMEM); fl->fl_file = filp; fl->fl_owner = filp; fl->fl_pid = current->tgid; fl->fl_flags = FL_FLOCK; fl->fl_type = type; fl->fl_end = OFFSET_MAX; return fl; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git6059.41%654.55%
matthew wilcoxmatthew wilcox2423.76%218.18%
jeff laytonjeff layton1615.84%218.18%
ingo molnaringo molnar10.99%19.09%
Total101100.00%11100.00%


static int assign_type(struct file_lock *fl, long type) { switch (type) { case F_RDLCK: case F_WRLCK: case F_UNLCK: fl->fl_type = type; break; default: return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git4397.73%150.00%
j. bruce fieldsj. bruce fields12.27%150.00%
Total44100.00%2100.00%


static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, struct flock64 *l) { switch (l->l_whence) { case SEEK_SET: fl->fl_start = 0; break; case SEEK_CUR: fl->fl_start = filp->f_pos; break; case SEEK_END: fl->fl_start = i_size_read(file_inode(filp)); break; default: return -EINVAL; } if (l->l_start > OFFSET_MAX - fl->fl_start) return -EOVERFLOW; fl->fl_start += l->l_start; if (fl->fl_start < 0) return -EINVAL; /* POSIX-1996 leaves the case l->l_len < 0 undefined; POSIX-2001 defines it. */ if (l->l_len > 0) { if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) return -EOVERFLOW; fl->fl_end = fl->fl_start + l->l_len - 1; } else if (l->l_len < 0) { if (fl->fl_start + l->l_len < 0) return -EINVAL; fl->fl_end = fl->fl_start - 1; fl->fl_start += l->l_len; } else fl->fl_end = OFFSET_MAX; fl->fl_owner = current->files; fl->fl_pid = current->tgid; fl->fl_file = filp; fl->fl_flags = FL_POSIX; fl->fl_ops = NULL; fl->fl_lmops = NULL; return assign_type(fl, l->l_type); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git10742.29%847.06%
j. bruce fieldsj. bruce fields7830.83%15.88%
trond myklebusttrond myklebust4015.81%211.76%
andries brouwerandries brouwer166.32%15.88%
al viroal viro31.19%15.88%
josef sipekjosef sipek31.19%15.88%
andrew mortonandrew morton31.19%15.88%
linus torvaldslinus torvalds20.79%15.88%
ingo molnaringo molnar10.40%15.88%
Total253100.00%17100.00%

/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX * style lock. */
static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, struct flock *l) { struct flock64 ll = { .l_type = l->l_type, .l_whence = l->l_whence, .l_start = l->l_start, .l_len = l->l_len, }; return flock64_to_posix_lock(filp, fl, &ll); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2943.94%125.00%
j. bruce fieldsj. bruce fields2639.39%125.00%
trond myklebusttrond myklebust710.61%125.00%
namhyung kimnamhyung kim46.06%125.00%
Total66100.00%4100.00%

/* default lease lock manager operations */
static bool lease_break_callback(struct file_lock *fl) { kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); return false; }

Contributors

PersonTokensPropCommitsCommitProp
william a. adamsonwilliam a. adamson2284.62%150.00%
jeff laytonjeff layton415.38%150.00%
Total26100.00%2100.00%


static void lease_setup(struct file_lock *fl, void **priv) { struct file *filp = fl->fl_file; struct fasync_struct *fa = *priv; /* * fasync_insert_entry() returns the old entry if any. If there was no * old entry, then it used "priv" and inserted it into the fasync list. * Clear the pointer to indicate that it shouldn't be freed. */ if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa)) *priv = NULL; __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton72100.00%1100.00%
Total72100.00%1100.00%

static const struct lock_manager_operations lease_manager_ops = { .lm_break = lease_break_callback, .lm_change = lease_modify, .lm_setup = lease_setup, }; /* * Initialize a lease, use the default lock manager operations */
static int lease_init(struct file *filp, long type, struct file_lock *fl) { if (assign_type(fl, type) != 0) return -EINVAL; fl->fl_owner = filp; fl->fl_pid = current->tgid; fl->fl_file = filp; fl->fl_flags = FL_LEASE; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; fl->fl_ops = NULL; fl->fl_lmops = &lease_manager_ops; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git5360.23%114.29%
trond myklebusttrond myklebust2326.14%228.57%
william a. adamsonwilliam a. adamson910.23%114.29%
j. bruce fieldsj. bruce fields11.14%114.29%
ingo molnaringo molnar11.14%114.29%
jeff laytonjeff layton11.14%114.29%
Total88100.00%7100.00%

/* Allocate a file_lock initialised to this type of lease */
static struct file_lock *lease_alloc(struct file *filp, long type) { struct file_lock *fl = locks_alloc_lock(); int error = -ENOMEM; if (fl == NULL) return ERR_PTR(error); error = lease_init(filp, type, fl); if (error) { locks_free_lock(fl); return ERR_PTR(error); } return fl; }

Contributors

PersonTokensPropCommitsCommitProp
william a. adamsonwilliam a. adamson4156.16%116.67%
j. bruce fieldsj. bruce fields1723.29%233.33%
trond myklebusttrond myklebust1216.44%233.33%
pre-gitpre-git34.11%116.67%
Total73100.00%6100.00%

/* Check if two locks overlap each other. */
static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) { return ((fl1->fl_end >= fl2->fl_start) && (fl2->fl_end >= fl1->fl_start)); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git40100.00%3100.00%
Total40100.00%3100.00%

/* * Check whether two locks have the same owner. */
static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) { if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner) return fl2->fl_lmops == fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner(fl1, fl2); return fl1->fl_owner == fl2->fl_owner; }

Contributors

PersonTokensPropCommitsCommitProp
trond myklebusttrond myklebust3052.63%228.57%
pre-gitpre-git2442.11%342.86%
j. bruce fieldsj. bruce fields23.51%114.29%
matthew wilcoxmatthew wilcox11.75%114.29%
Total57100.00%7100.00%

/* Must be called with the flc_lock held! */
static void locks_insert_global_locks(struct file_lock *fl) { lg_local_lock(&file_lock_lglock); fl->fl_link_cpu = smp_processor_id(); hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list)); lg_local_unlock(&file_lock_lglock); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton44100.00%4100.00%
Total44100.00%4100.00%

/* Must be called with the flc_lock held! */
static void locks_delete_global_locks(struct file_lock *fl) { /* * Avoid taking lock if already unhashed. This is safe since this check * is done while holding the flc_lock, and new insertions into the list * also require that it be held. */ if (hlist_unhashed(&fl->fl_link)) return; lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu); hlist_del_init(&fl->fl_link); lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton51100.00%5100.00%
Total51100.00%5100.00%


static unsigned long posix_owner_key(struct file_lock *fl) { if (fl->fl_lmops && fl->fl_lmops->lm_owner_key) return fl->fl_lmops->lm_owner_key(fl); return (unsigned long)fl->fl_owner; }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton43100.00%2100.00%
Total43100.00%2100.00%


static void locks_insert_global_blocked(struct file_lock *waiter) { lockdep_assert_held(&blocked_lock_lock); hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton2681.25%375.00%
daniel wagnerdaniel wagner618.75%125.00%
Total32100.00%4100.00%


static void locks_delete_global_blocked(struct file_lock *waiter) { lockdep_assert_held(&blocked_lock_lock); hash_del(&waiter->fl_link); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton1976.00%266.67%
daniel wagnerdaniel wagner624.00%133.33%
Total25100.00%3100.00%

/* Remove waiter from blocker's block list. * When blocker ends up pointing to itself then the list is empty. * * Must be called with blocked_lock_lock held. */
static void __locks_delete_block(struct file_lock *waiter) { locks_delete_global_blocked(waiter); list_del_init(&waiter->fl_block); waiter->fl_next = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2686.67%457.14%
jeff laytonjeff layton26.67%114.29%
matthew wilcoxmatthew wilcox13.33%114.29%
stephen rothwellstephen rothwell13.33%114.29%
Total30100.00%7100.00%


static void locks_delete_block(struct file_lock *waiter) { spin_lock(&blocked_lock_lock); __locks_delete_block(waiter); spin_unlock(&blocked_lock_lock); }

Contributors

PersonTokensPropCommitsCommitProp
matthew wilcoxmatthew wilcox1760.71%125.00%
jeff laytonjeff layton1139.29%375.00%
Total28100.00%4100.00%

/* Insert waiter into blocker's block list. * We use a circular list so that processes can be easily woken up in * the order they blocked. The documentation doesn't require this but * it seems like the reasonable thing to do. * * Must be called with both the flc_lock and blocked_lock_lock held. The * fl_block list itself is protected by the blocked_lock_lock, but by ensuring * that the flc_lock is also held on insertions we can avoid taking the * blocked_lock_lock in some cases when we see that the fl_block list is empty. */
static void __locks_insert_block(struct file_lock *blocker, struct file_lock *waiter) { BUG_ON(!list_empty(&waiter->fl_block)); waiter->fl_next = blocker; list_add_tail(&waiter->fl_block, &blocker->fl_block); if (IS_POSIX(blocker) && !IS_OFDLCK(blocker)) locks_insert_global_blocked(waiter); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3858.46%650.00%
jeff laytonjeff layton1827.69%433.33%
matthew wilcoxmatthew wilcox710.77%18.33%
j. bruce fieldsj. bruce fields23.08%18.33%
Total65100.00%12100.00%

/* Must be called with flc_lock held. */
static void locks_insert_block(struct file_lock *blocker, struct file_lock *waiter) { spin_lock(&blocked_lock_lock); __locks_insert_block(blocker, waiter); spin_unlock(&blocked_lock_lock); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton3291.43%250.00%
pre-gitpre-git38.57%250.00%
Total35100.00%4100.00%

/* * Wake up processes blocked waiting for blocker. * * Must be called with the inode->flc_lock held! */
static void locks_wake_up_blocks(struct file_lock *blocker) { /* * Avoid taking global lock if list is empty. This is safe since new * blocked requests are only added to the list under the flc_lock, and * the flc_lock is always held here. Note that removal from the fl_block * list does not require the flc_lock, so we must recheck list_empty() * after acquiring the blocked_lock_lock. */ if (list_empty(&blocker->fl_block)) return; spin_lock(&blocked_lock_lock); while (!list_empty(&blocker->fl_block)) { struct file_lock *waiter; waiter = list_first_entry(&blocker->fl_block, struct file_lock, fl_block); __locks_delete_block(waiter); if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) waiter->fl_lmops->lm_notify(waiter); else wake_up(&waiter->fl_wait); } spin_unlock(&blocked_lock_lock); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git4644.66%535.71%
jeff laytonjeff layton2423.30%428.57%
matthew wilcoxmatthew wilcox1716.50%214.29%
trond myklebusttrond myklebust87.77%17.14%
pavel emelianovpavel emelianov65.83%17.14%
j. bruce fieldsj. bruce fields21.94%17.14%
Total103100.00%14100.00%


static void locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) { fl->fl_nspid = get_pid(task_tgid(current)); list_add_tail(&fl->fl_list, before); locks_insert_global_locks(fl); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton43100.00%1100.00%
Total43100.00%1100.00%


static void locks_unlink_lock_ctx(struct file_lock *fl) { locks_delete_global_locks(fl); list_del_init(&fl->fl_list); if (fl->fl_nspid) { put_pid(fl->fl_nspid); fl->fl_nspid = NULL; } locks_wake_up_blocks(fl); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2040.00%450.00%
vitaliy gusevvitaliy gusev1734.00%112.50%
jeff laytonjeff layton1326.00%337.50%
Total50100.00%8100.00%


static void locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) { locks_unlink_lock_ctx(fl); if (dispose) list_add(&fl->fl_list, dispose); else locks_free_lock(fl); }

Contributors

PersonTokensPropCommitsCommitProp
jeff laytonjeff layton3585.37%480.00%
pre-gitpre-git614.63%120.00%
Total41100.00%5100.00%

/* Determine if lock sys_fl blocks lock caller_fl. Common functionality * checks for shared/exclusive status of overlapping locks. */
static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { if (sys_fl->fl_type == F_WRLCK) return 1; if (caller_fl->fl_type == F_WRLCK) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3175.61%266.67%
matthew wilcoxmatthew wilcox1024.39%133.33%
Total41100.00%3100.00%

/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific * checking before calling the locks_conflict(). */
static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { /* POSIX locks owned by the same process do not conflict with * each other. */ if (posix_same_owner(caller_fl, sys_fl)) return (0); /* Check whether they overlap */ if (!locks_overlap(caller_fl, sys_fl)) return 0; return (locks_conflict(caller_fl, sys_fl)); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git5498.18%583.33%
matthew wilcoxmatthew wilcox11.82%116.67%
Total55100.00%6100.00%

/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific * checking before calling the locks_conflict(). */
static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { /* FLOCK locks referring to the same filp do not conflict with * each other. */ if (caller_fl->fl_file == sys_fl->fl_file) return (0); if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) return 0; return (locks_conflict(caller_fl, sys_fl)); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git63100.00%5100.00%
Total63100.00%5100.00%


void posix_test_lock(struct file *filp, struct file_lock *fl) { struct file_lock *cfl; struct file_lock_context *ctx; struct inode *inode = file_inode(filp); ctx = smp_load_acquire(&inode->i_flctx); if (!ctx || list_empty_careful(&ctx->flc_posix)) { fl->fl_type = F_UNLCK; return; } spin_lock(&ctx->flc_lock); list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { if (posix_locks_conflict(fl, cfl)) { locks_copy_conflock(fl, cfl); if (cfl->fl_nspid) fl->fl_pid = pid_vnr(cfl->fl_nspid); goto out; } } fl->fl_type = F_UNLCK; out: spin_unlock(