cregit-Linux how code gets into the kernel

Release 4.10 fs/lockd/clntlock.c

Directory: fs/lockd
 * linux/fs/lockd/clntlock.c
 * Lock handling for the client side NLM implementation
 * Copyright (C) 1996, Olaf Kirch <>

#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/nfs_fs.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/kthread.h>


 * Local function prototypes
static int			reclaimer(void *ptr);

 * The following functions handle blocking and granting from the
 * client perspective.

 * This is the representation of a blocked client lock.

struct nlm_wait {
struct list_head	b_list;		/* linked list */
wait_queue_head_t	b_wait;		/* where to wait on */
struct nlm_host *	b_host;
struct file_lock *	b_lock;		/* local file lock */
unsigned short		b_reclaim;	/* got to reclaim lock */
__be32			b_status;	/* grant callback status */

static LIST_HEAD(nlm_blocked);
static DEFINE_SPINLOCK(nlm_blocked_lock);

 * nlmclnt_init - Set up per-NFS mount point lockd data structures
 * @nlm_init: pointer to arguments structure
 * Returns pointer to an appropriate nlm_host struct,
 * or an ERR_PTR value.

struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) { struct nlm_host *host; u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4; int status; status = lockd_up(nlm_init->net); if (status < 0) return ERR_PTR(status); host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen, nlm_init->protocol, nlm_version, nlm_init->hostname, nlm_init->noresvport, nlm_init->net); if (host == NULL) goto out_nohost; if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL) goto out_nobind; return host; out_nobind: nlmclnt_release_host(host); out_nohost: lockd_down(nlm_init->net); return ERR_PTR(-ENOLCK); }


chuck leverchuck lever9466.67%457.14%
trond myklebusttrond myklebust3323.40%114.29%
stanislav kinsburskystanislav kinsbursky149.93%228.57%

EXPORT_SYMBOL_GPL(nlmclnt_init); /** * nlmclnt_done - Release resources allocated by nlmclnt_init() * @host: nlm_host structure reserved by nlmclnt_init() * */
void nlmclnt_done(struct nlm_host *host) { struct net *net = host->net; nlmclnt_release_host(host); lockd_down(net); }


chuck leverchuck lever1758.62%266.67%
stanislav kinsburskystanislav kinsbursky1241.38%133.33%

EXPORT_SYMBOL_GPL(nlmclnt_done); /* * Queue up a lock for blocking so that the GRANTED request can see it */
struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl) { struct nlm_wait *block; block = kmalloc(sizeof(*block), GFP_KERNEL); if (block != NULL) { block->b_host = host; block->b_lock = fl; init_waitqueue_head(&block->b_wait); block->b_status = nlm_lck_blocked; spin_lock(&nlm_blocked_lock); list_add(&block->b_list, &nlm_blocked); spin_unlock(&nlm_blocked_lock); } return block; }


trond myklebusttrond myklebust4345.26%342.86%
bryan schumakerbryan schumaker1212.63%114.29%
al viroal viro11.05%114.29%

void nlmclnt_finish_block(struct nlm_wait *block) { if (block == NULL) return; spin_lock(&nlm_blocked_lock); list_del(&block->b_list); spin_unlock(&nlm_blocked_lock); kfree(block); }


trond myklebusttrond myklebust2354.76%360.00%
bryan schumakerbryan schumaker1228.57%120.00%

/* * Block on a lock */
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout) { long ret; /* A borken server might ask us to block even if we didn't * request it. Just say no! */ if (block == NULL) return -EAGAIN; /* Go to sleep waiting for GRANT callback. Some servers seem * to lose callbacks, however, so we're going to poll from * time to time just to make sure. * * For now, the retry frequency is pretty high; normally * a 1 minute timeout would do. See the comment before * nlmclnt_lock for an explanation. */ ret = wait_event_interruptible_timeout(block->b_wait, block->b_status != nlm_lck_blocked, timeout); if (ret < 0) return -ERESTARTSYS; /* Reset the lock status after a server reboot so we resend */ if (block->b_status == nlm_lck_denied_grace_period) block->b_status = nlm_lck_blocked; req->a_res.status = block->b_status; return 0; }


trond myklebusttrond myklebust7484.09%360.00%
al viroal viro11.14%120.00%

/* * The server lockd has called us back to tell us the lock was granted */
__be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) { const struct file_lock *fl = &lock->fl; const struct nfs_fh *fh = &lock->fh; struct nlm_wait *block; __be32 res = nlm_lck_denied; /* * Look up blocked request based on arguments. * Warning: must not use cookie to match it! */ spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { struct file_lock *fl_blocked = block->b_lock; if (fl_blocked->fl_start != fl->fl_start) continue; if (fl_blocked->fl_end != fl->fl_end) continue; /* * Careful! The NLM server will return the 32-bit "pid" that * we put on the wire: in this case the lockowner "pid". */ if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) continue; if (!rpc_cmp_addr(nlm_addr(block->b_host), addr)) continue; if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->fl_file)) ,fh) != 0) continue; /* Alright, we found a lock. Set the return status * and wake up the caller */ block->b_status = nlm_granted; wake_up(&block->b_wait); res = nlm_granted; } spin_unlock(&nlm_blocked_lock); return res; }


trond myklebusttrond myklebust11664.80%430.77%
bryan schumakerbryan schumaker126.70%17.69%
al viroal viro63.35%323.08%
chuck leverchuck lever52.79%323.08%
jeff laytonjeff layton10.56%17.69%

/* * The following procedures deal with the recovery of locks after a * server crash. */ /* * Reclaim all locks on server host. We do this by spawning a separate * reclaimer thread. */
void nlmclnt_recovery(struct nlm_host *host) { struct task_struct *task; if (!host->h_reclaiming++) { nlm_get_host(host); task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name); if (IS_ERR(task)) printk(KERN_ERR "lockd: unable to spawn reclaimer " "thread. Locks for %s won't be reclaimed! " "(%ld)\n", host->h_name, PTR_ERR(task)); } }


jeff laytonjeff layton3246.38%116.67%
linus torvaldslinus torvalds1115.94%116.67%
trond myklebusttrond myklebust68.70%116.67%
rusty russellrusty russell68.70%116.67%

static int reclaimer(void *ptr) { struct nlm_host *host = (struct nlm_host *) ptr; struct nlm_wait *block; struct nlm_rqst *req; struct file_lock *fl, *next; u32 nsmstate; struct net *net = host->net; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) { printk(KERN_ERR "lockd: reclaimer unable to alloc memory." " Locks for %s won't be reclaimed!\n", host->h_name); return 0; } allow_signal(SIGKILL); down_write(&host->h_rwsem); lockd_up(net); /* note: this cannot fail as lockd is already running */ dprintk("lockd: reclaiming locks for host %s\n", host->h_name); restart: nsmstate = host->h_nsmstate; /* Force a portmap getport - the peer's lockd will * most likely end up on a different port. */ host->h_nextrebind = jiffies; nlm_rebind_host(host); /* First, reclaim all locks that have been granted. */ list_splice_init(&host->h_granted, &host->h_reclaim); list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { list_del_init(&fl->fl_u.nfs_fl.list); /* * sending this thread a SIGKILL will result in any unreclaimed * locks being removed from the h_granted list. This means that * the kernel will not attempt to reclaim them again if a new * reclaimer thread is spawned for this host. */ if (signalled()) continue; if (nlmclnt_reclaim(host, fl, req) != 0) continue; list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); if (host->h_nsmstate != nsmstate) { /* Argh! The server rebooted again! */ goto restart; } } host->h_reclaiming = 0; up_write(&host->h_rwsem); dprintk("NLM: done reclaiming locks for host %s\n", host->h_name); /* Now, wake up all processes that sleep on a blocked lock */ spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (block->b_host == host) { block->b_status = nlm_lck_denied_grace_period; wake_up(&block->b_wait); } } spin_unlock(&nlm_blocked_lock); /* Release host handle after use */ nlmclnt_release_host(host); lockd_down(net); kfree(req); return 0; }


trond myklebusttrond myklebust6922.04%316.67%
olaf kircholaf kirch5417.25%211.11%
tim gardnertim gardner4614.70%15.56%
stanislav kinsburskystanislav kinsbursky154.79%15.56%
linus torvaldslinus torvalds134.15%211.11%
bryan schumakerbryan schumaker123.83%15.56%
christoph hellwigchristoph hellwig113.51%15.56%
jeff laytonjeff layton30.96%15.56%
adrian bunkadrian bunk20.64%15.56%
neil brownneil brown10.32%15.56%
chuck leverchuck lever10.32%15.56%
al viroal viro10.32%15.56%

Overall Contributors

trond myklebusttrond myklebust37235.00%921.43%
chuck leverchuck lever12912.14%819.05%
olaf kircholaf kirch545.08%24.76%
bryan schumakerbryan schumaker545.08%12.38%
tim gardnertim gardner464.33%12.38%
stanislav kinsburskystanislav kinsbursky413.86%24.76%
jeff laytonjeff layton383.57%37.14%
linus torvaldslinus torvalds292.73%24.76%
christoph hellwigchristoph hellwig111.03%12.38%
al viroal viro100.94%37.14%
rusty russellrusty russell60.56%12.38%
tejun heotejun heo30.28%12.38%
adrian bunkadrian bunk20.19%12.38%
neil brownneil brown10.09%12.38%
dave jonesdave jones10.09%12.38%
Directory: fs/lockd
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.