Release 4.10 fs/nfsd/nfs4state.c
/*
* Copyright (c) 2001 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <kandros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/addr.h>
#include <linux/jhash.h>
#include "xdr4.h"
#include "xdr4cb.h"
#include "vfs.h"
#include "current_stateid.h"
#include "netns.h"
#include "pnfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
#define all_ones {{~0,~0},~0}
static const stateid_t one_stateid = {
.si_generation = ~0,
.si_opaque = all_ones,
};
static const stateid_t zero_stateid = {
/* all fields zero */
};
static const stateid_t currentstateid = {
.si_generation = 1,
};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
/* Locking: */
/*
* Currently used for the del_recall_lru and file hash table. In an
* effort to decrease the scope of the client_mutex, this spinlock may
* eventually cover more:
*/
static DEFINE_SPINLOCK(state_lock);
/*
* A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
* the refcount on the open stateid to drop.
*/
static DECLARE_WAIT_QUEUE_HEAD(close_wq);
static struct kmem_cache *openowner_slab;
static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
static struct kmem_cache *odstate_slab;
static void free_session(struct nfsd4_session *);
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
static bool is_session_dead(struct nfsd4_session *ses)
{
return ses->se_flags & NFS4_SESSION_DEAD;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
benny halevy | benny halevy | 10 | 55.56% | 1 | 50.00% |
j. bruce fields | j. bruce fields | 8 | 44.44% | 1 | 50.00% |
| Total | 18 | 100.00% | 2 | 100.00% |
static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
{
if (atomic_read(&ses->se_ref) > ref_held_by_me)
return nfserr_jukebox;
ses->se_flags |= NFS4_SESSION_DEAD;
return nfs_ok;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 38 | 100.00% | 2 | 100.00% |
| Total | 38 | 100.00% | 2 | 100.00% |
static bool is_client_expired(struct nfs4_client *clp)
{
return clp->cl_time == 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 10 | 55.56% | 1 | 50.00% |
neil brown | neil brown | 8 | 44.44% | 1 | 50.00% |
| Total | 18 | 100.00% | 2 | 100.00% |
static __be32 get_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (is_client_expired(clp))
return nfserr_expired;
atomic_inc(&clp->cl_refcount);
return nfs_ok;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 22 | 40.74% | 1 | 25.00% |
j. bruce fields | j. bruce fields | 21 | 38.89% | 2 | 50.00% |
neil brown | neil brown | 11 | 20.37% | 1 | 25.00% |
| Total | 54 | 100.00% | 4 | 100.00% |
/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (is_client_expired(clp)) {
WARN_ON(1);
printk("%s: client (clientid %08x/%08x) already expired\n",
__func__,
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
return;
}
dprintk("renewing client (clientid %08x/%08x)\n",
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
list_move_tail(&clp->cl_lru, &nn->client_lru);
clp->cl_time = get_seconds();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 97 | 100.00% | 1 | 100.00% |
| Total | 97 | 100.00% | 1 | 100.00% |
static void put_client_renew_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (!atomic_dec_and_test(&clp->cl_refcount))
return;
if (!is_client_expired(clp))
renew_client_locked(clp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 35 | 60.34% | 1 | 33.33% |
trond myklebust | trond myklebust | 22 | 37.93% | 1 | 33.33% |
fengguang wu | fengguang wu | 1 | 1.72% | 1 | 33.33% |
| Total | 58 | 100.00% | 3 | 100.00% |
static void put_client_renew(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
return;
if (!is_client_expired(clp))
renew_client_locked(clp);
spin_unlock(&nn->client_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 63 | 100.00% | 2 | 100.00% |
| Total | 63 | 100.00% | 2 | 100.00% |
static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
{
__be32 status;
if (is_session_dead(ses))
return nfserr_badsession;
status = get_client_locked(ses->se_client);
if (status)
return status;
atomic_inc(&ses->se_ref);
return nfs_ok;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 51 | 100.00% | 1 | 100.00% |
| Total | 51 | 100.00% | 1 | 100.00% |
static void nfsd4_put_session_locked(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
free_session(ses);
put_client_renew_locked(clp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 67 | 100.00% | 2 | 100.00% |
| Total | 67 | 100.00% | 2 | 100.00% |
static void nfsd4_put_session(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
spin_lock(&nn->client_lock);
nfsd4_put_session_locked(ses);
spin_unlock(&nn->client_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 55 | 100.00% | 1 | 100.00% |
| Total | 55 | 100.00% | 1 | 100.00% |
static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
struct nfsd_net *nn)
{
struct nfsd4_blocked_lock *cur, *found = NULL;
spin_lock(&nn->blocked_locks_lock);
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
list_del_init(&cur->nbl_lru);
found = cur;
break;
}
}
spin_unlock(&nn->blocked_locks_lock);
if (found)
posix_unblock_lock(&found->nbl_lock);
return found;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 108 | 99.08% | 3 | 75.00% |
neil brown | neil brown | 1 | 0.92% | 1 | 25.00% |
| Total | 109 | 100.00% | 4 | 100.00% |
static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
struct nfsd_net *nn)
{
struct nfsd4_blocked_lock *nbl;
nbl = find_blocked_lock(lo, fh, nn);
if (!nbl) {
nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
&nfsd4_cb_notify_lock_ops,
NFSPROC4_CLNT_CB_NOTIFY_LOCK);
}
}
return nbl;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 105 | 100.00% | 1 | 100.00% |
| Total | 105 | 100.00% | 1 | 100.00% |
static void
free_blocked_lock(struct nfsd4_blocked_lock *nbl)
{
locks_release_private(&nbl->nbl_lock);
kfree(nbl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
/*
* Since this is just an optimization, we don't try very hard if it
* turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
* just quit trying on anything else.
*/
switch (task->tk_status) {
case -NFS4ERR_DELAY:
rpc_delay(task, 1 * HZ);
return 0;
default:
return 1;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 45 | 100.00% | 1 | 100.00% |
| Total | 45 | 100.00% | 1 | 100.00% |
static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
{
struct nfsd4_blocked_lock *nbl = container_of(cb,
struct nfsd4_blocked_lock, nbl_cb);
free_blocked_lock(nbl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 31 | 100.00% | 1 | 100.00% |
| Total | 31 | 100.00% | 1 | 100.00% |
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
.done = nfsd4_cb_notify_lock_done,
.release = nfsd4_cb_notify_lock_release,
};
static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner *sop)
{
atomic_inc(&sop->so_count);
return sop;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kinglong mee | kinglong mee | 24 | 96.00% | 1 | 50.00% |
jeff layton | jeff layton | 1 | 4.00% | 1 | 50.00% |
| Total | 25 | 100.00% | 2 | 100.00% |
static int
same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
{
return (sop->so_owner.len == owner->len) &&
0 == memcmp(sop->so_owner.data, owner->data, owner->len);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 47 | 97.92% | 1 | 50.00% |
kinglong mee | kinglong mee | 1 | 2.08% | 1 | 50.00% |
| Total | 48 | 100.00% | 2 | 100.00% |
static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_stateowner *so;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
so_strhash) {
if (!so->so_is_open_owner)
continue;
if (same_owner_str(so, &open->op_owner))
return openowner(nfs4_get_stateowner(so));
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 74 | 96.10% | 2 | 66.67% |
kinglong mee | kinglong mee | 3 | 3.90% | 1 | 33.33% |
| Total | 77 | 100.00% | 3 | 100.00% |
static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
spin_lock(&clp->cl_lock);
oo = find_openstateowner_str_locked(hashval, open, clp);
spin_unlock(&clp->cl_lock);
return oo;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 57 | 100.00% | 2 | 100.00% |
| Total | 57 | 100.00% | 2 | 100.00% |
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
unsigned char *cptr = (unsigned char *) ptr;
u32 x = 0;
while (nbytes--) {
x *= 37;
x += *cptr++;
}
return x;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
neil brown | neil brown | 51 | 98.08% | 1 | 50.00% |
trond myklebust | trond myklebust | 1 | 1.92% | 1 | 50.00% |
| Total | 52 | 100.00% | 2 | 100.00% |
static void nfsd4_free_file_rcu(struct rcu_head *rcu)
{
struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
kmem_cache_free(file_slab, fp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 19 | 57.58% | 1 | 50.00% |
j. bruce fields | j. bruce fields | 14 | 42.42% | 1 | 50.00% |
| Total | 33 | 100.00% | 2 | 100.00% |
void
put_nfs4_file(struct nfs4_file *fi)
{
might_lock(&state_lock);
if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 25 | 31.65% | 2 | 33.33% |
neil brown | neil brown | 20 | 25.32% | 1 | 16.67% |
j. bruce fields | j. bruce fields | 20 | 25.32% | 1 | 16.67% |
sachin bhamare | sachin bhamare | 12 | 15.19% | 1 | 16.67% |
benny halevy | benny halevy | 2 | 2.53% | 1 | 16.67% |
| Total | 79 | 100.00% | 6 | 100.00% |
static struct file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
if (f->fi_fds[oflag])
return get_file(f->fi_fds[oflag]);
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 38 | 97.44% | 1 | 50.00% |
neil brown | neil brown | 1 | 2.56% | 1 | 50.00% |
| Total | 39 | 100.00% | 2 | 100.00% |
static struct file *
find_writeable_file_locked(struct nfs4_file *f)
{
struct file *ret;
lockdep_assert_held(&f->fi_lock);
ret = __nfs4_get_fd(f, O_WRONLY);
if (!ret)
ret = __nfs4_get_fd(f, O_RDWR);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 52 | 100.00% | 1 | 100.00% |
| Total | 52 | 100.00% | 1 | 100.00% |
static struct file *
find_writeable_file(struct nfs4_file *f)
{
struct file *ret;
spin_lock(&f->fi_lock);
ret = find_writeable_file_locked(f);
spin_unlock(&f->fi_lock);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 44 | 100.00% | 1 | 100.00% |
| Total | 44 | 100.00% | 1 | 100.00% |
static struct file *find_readable_file_locked(struct nfs4_file *f)
{
struct file *ret;
lockdep_assert_held(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDONLY);
if (!ret)
ret = __nfs4_get_fd(f, O_RDWR);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 52 | 100.00% | 1 | 100.00% |
| Total | 52 | 100.00% | 1 | 100.00% |
static struct file *
find_readable_file(struct nfs4_file *f)
{
struct file *ret;
spin_lock(&f->fi_lock);
ret = find_readable_file_locked(f);
spin_unlock(&f->fi_lock);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 44 | 100.00% | 1 | 100.00% |
| Total | 44 | 100.00% | 1 | 100.00% |
struct file *
find_any_file(struct nfs4_file *f)
{
struct file *ret;
spin_lock(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDWR);
if (!ret) {
ret = __nfs4_get_fd(f, O_WRONLY);
if (!ret)
ret = __nfs4_get_fd(f, O_RDONLY);
}
spin_unlock(&f->fi_lock);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 75 | 100.00% | 1 | 100.00% |
| Total | 75 | 100.00% | 1 | 100.00% |
static atomic_long_t num_delegations;
unsigned long max_delegations;
/*
* Open owner state (share locks)
*/
/* hash tables for lock and open owners */
#define OWNER_HASH_BITS 8
#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
{
unsigned int ret;
ret = opaque_hashval(ownername->data, ownername->len);
return ret & OWNER_HASH_MASK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 31 | 91.18% | 2 | 66.67% |
neil brown | neil brown | 3 | 8.82% | 1 | 33.33% |
| Total | 34 | 100.00% | 3 | 100.00% |
/* hash table for nfs4_file */
#define FILE_HASH_BITS 8
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
{
return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 31 | 100.00% | 1 | 100.00% |
| Total | 31 | 100.00% | 1 | 100.00% |
static unsigned int file_hashval(struct knfsd_fh *fh)
{
return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 13 | 54.17% | 1 | 33.33% |
j. bruce fields | j. bruce fields | 8 | 33.33% | 1 | 33.33% |
neil brown | neil brown | 3 | 12.50% | 1 | 33.33% |
| Total | 24 | 100.00% | 3 | 100.00% |
static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
static void
__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{
lockdep_assert_held(&fp->fi_lock);
if (access & NFS4_SHARE_ACCESS_WRITE)
atomic_inc(&fp->fi_access[O_WRONLY]);
if (access & NFS4_SHARE_ACCESS_READ)
atomic_inc(&fp->fi_access[O_RDONLY]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 29 | 51.79% | 2 | 50.00% |
jeff layton | jeff layton | 27 | 48.21% | 2 | 50.00% |
| Total | 56 | 100.00% | 4 | 100.00% |
static __be32
nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{
lockdep_assert_held(&fp->fi_lock);
/* Does this access mode make sense? */
if (access & ~NFS4_SHARE_ACCESS_BOTH)
return nfserr_inval;
/* Does it conflict with a deny mode already set? */
if ((access & fp->fi_share_deny) != 0)
return nfserr_share_denied;
__nfs4_file_get_access(fp, access);
return nfs_ok;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 39 | 66.10% | 3 | 60.00% |
j. bruce fields | j. bruce fields | 20 | 33.90% | 2 | 40.00% |
| Total | 59 | 100.00% | 5 | 100.00% |
static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
{
/* Common case is that there is no deny mode. */
if (deny) {
/* Does this deny mode make sense? */
if (deny & ~NFS4_SHARE_DENY_BOTH)
return nfserr_inval;
if ((deny & NFS4_SHARE_DENY_READ) &&
atomic_read(&fp->fi_access[O_RDONLY]))
return nfserr_share_denied;
if ((deny & NFS4_SHARE_DENY_WRITE) &&
atomic_read(&fp->fi_access[O_WRONLY]))
return nfserr_share_denied;
}
return nfs_ok;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeff layton | jeff layton | 79 | 100.00% | 1 | 100.00% |
| Total | 79 | 100.00% | 1 | 100.00% |
static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
{
might_lock(&fp->fi_lock);
if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
struct file *f1 = NULL;
struct file *f2 = NULL;
swap(f1, fp->fi_fds[oflag]);
if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
swap(f2, fp->fi_fds[O_RDWR]);
spin_unlock(&fp->fi_lock);
if (f1)
fput(f1);
if (f2)
fput(f2);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 55 | 44.72% | 3 | 60.00% |
trond myklebust | trond myklebust | 54 | 43.90% | 1 | 20.00% |
jeff layton | jeff layton | 14 | 11.38% | 1 | 20.00% |
| Total | 123 | 100.00% | 5 | 100.00% |
static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
{
WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
if (access & NFS4_SHARE_ACCESS_WRITE)
__nfs4_file_put_access(fp, O_WRONLY);
if (access & NFS4_SHARE_ACCESS_READ)
__nfs4_file_put_access(fp, O_RDONLY);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 28 | 58.33% | 1 | 50.00% |
jeff layton | jeff layton | 20 | 41.67% | 1 | 50.00% |
| Total | 48 | 100.00% | 2 | 100.00% |
/*
* Allocate a new open/delegation state counter. This is needed for
* pNFS for proper return on close semantics.
*
* Note that we only allocate it for pNFS-enabled exports, otherwise
* all pointers to struct nfs4_clnt_odstate are always NULL.
*/
static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client *clp)
{
struct nfs4_clnt_odstate *co;
co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
if (co) {
co->co_client = clp;
atomic_set(&co->co_odcount, 1);
}
return co;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sachin bhamare | sachin bhamare | 52 | 100.00% | 1 | 100.00% |
| Total | 52 | 100.00% | 1 | 100.00% |
static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp = co->co_file;
lockdep_assert_held(&fp->fi_lock);
list_add(&co->co_perfile, &fp->fi_clnt_odstate);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sachin bhamare | sachin bhamare | 41 | 100.00% | 1 | 100.00% |
| Total | 41 | 100.00% | 1 | 100.00% |
static inline void
get_clnt_odstate(struct nfs4_clnt_odstate *co)
{
if (co)
atomic_inc(&co->co_odcount);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sachin bhamare | sachin bhamare | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
static void
put_clnt_odstate(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp;
if (!co)
return;
fp = co->co_file;
if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
list_del(&co->co_perfile);
spin_unlock(&fp->fi_lock);
nfsd4_return_all_file_layouts(co->co_client, fp);
kmem_cache_free(odstate_slab, co);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sachin bhamare | sachin bhamare | 77 | 100.00% | 1 | 100.00% |
| Total | 77 | 100.00% | 1 | 100.00% |
static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
{
struct nfs4_clnt_odstate *co;
struct nfs4_client *cl;
if (!new)
return NULL;
cl = new->co_client;
spin_lock(&fp->fi_lock);
list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
if (co->co_client == cl) {
get_clnt_odstate(co);
goto out;
}
}
co = new;
co->co_file = fp;
hash_clnt_odstate_locked(new);
out:
spin_unlock(&fp->fi_lock);
return co;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sachin bhamare | sachin bhamare | 106 | 100.00% | 1 | 100.00% |
| Total | 106 | 100.00% | 1 | 100.00% |
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
void (*sc_free)(struct nfs4_stid *))
{
struct nfs4_stid *stid;
int new_id;
stid = kmem_cache_zalloc(slab, GFP_KERNEL);
if (!stid)
return NULL;
idr_preload(GFP_KERNEL);
spin_lock(&cl->cl_lock);
new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
spin_unlock(&cl->cl_lock);
idr_preload_end();
if (new_id < 0)
goto out_free;
stid->sc_free = sc_free;
stid->sc_client = cl;
stid->sc_stateid.si_opaque.so_id = new_id;
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
/* Will be incremented before return to client: */
atomic_set(&stid->sc_count, 1);
spin_lock_init(&stid->sc_lock);
/*
* It shouldn't be a problem to reuse an opaque stateid value.
* I don't think it is for 4.1. But with 4.0 I worry that, for
* example, a stray write retransmission could be accepted by
* the server when it should have been rejected. Therefore,
* adopt a trick from the sctp code to attempt to maximize the
* amount of time until an id is reused, by ensuring they always
* "increase" (mod INT_MAX):
*/
return stid;
out_free:
kmem_cache_free(slab, stid);
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 93 | 53.76% | 4 | 33.33% |
jeff layton | jeff layton | 39 | 22.54% | 3 | 25.00% |
kinglong mee | kinglong mee | 17 | 9.83% | 1 | 8.33% |
trond myklebust | trond myklebust | 11 | 6.36% | 2 | 16.67% |
tejun heo | tejun heo | 10 | 5.78% | 1 | 8.33% |
wei yongjun | wei yongjun | 3 | 1.73% | 1 | 8.33% |
| Total | 173 | 100.00% | 12 | 100.00% |
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
{
struct nfs4_stid *stid;
stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
if (!stid)
return NULL;
return openlockstateid(stid);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
j. bruce fields | j. bruce fields | 20 | 46.51% | 1 | 25.00% |
trond myklebust | trond myklebust | 19 | 44.19% | 1 | 25.00% |
kinglong mee | kinglong mee | 3 | 6.98% | 1 | 25.00% |
jeff layton | jeff layton | 1 | 2.33% | 1 | 25.00% |
| Total | 43 | 100.00% | 4 | 100.00% |
static void nfs4_free_deleg(struct nfs4_stid *stid)
{
kmem_cache_free(deleg_slab, stid);
atomic_long_dec(&num_delegations);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
trond myklebust | trond myklebust | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
/*
* When we recall a delegation, we should be careful not to hand it
* ou