cregit-Linux how code gets into the kernel

Release 4.10 fs/gfs2/glock.c

Directory: fs/gfs2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
 * of the GNU General Public License version 2.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
#include <linux/kallsyms.h>
#include <linux/gfs2_ondisk.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
#include <linux/percpu.h>
#include <linux/list_sort.h>
#include <linux/lockref.h>
#include <linux/rhashtable.h>

#include "gfs2.h"
#include "incore.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
#include "util.h"
#include "bmap.h"

#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"


struct gfs2_glock_iter {
	
struct gfs2_sbd *sdp;		/* incore superblock           */
	
struct rhashtable_iter hti;	/* rhashtable iterator         */
	
struct gfs2_glock *gl;		/* current glock struct        */
	
loff_t last_pos;		/* last position               */
};


typedef void (*glock_examiner) (struct gfs2_glock * gl);

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);


static struct dentry *gfs2_root;

static struct workqueue_struct *glock_workqueue;

struct workqueue_struct *gfs2_delete_workqueue;
static LIST_HEAD(lru_list);

static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);


#define GFS2_GL_HASH_SHIFT      15

#define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)


static struct rhashtable_params ht_parms = {
	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
	.key_len = sizeof(struct lm_lockname),
	.key_offset = offsetof(struct gfs2_glock, gl_name),
	.head_offset = offsetof(struct gfs2_glock, gl_node),
};


static struct rhashtable gl_hash_table;


void gfs2_glock_free(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; if (gl->gl_ops->go_flags & GLOF_ASPACE) { kmem_cache_free(gfs2_glock_aspace_cachep, gl); } else { kfree(gl->gl_lksb.sb_lvbptr); kmem_cache_free(gfs2_glock_cachep, gl); } if (atomic_dec_and_test(&sdp->sd_glock_disposal)) wake_up(&sdp->sd_glock_wait); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse4558.44%450.00%
david teiglanddavid teigland2228.57%337.50%
robert s. petersonrobert s. peterson1012.99%112.50%
Total77100.00%8100.00%

/** * gfs2_glock_hold() - increment reference count on glock * @gl: The glock to hold * */
static void gfs2_glock_hold(struct gfs2_glock *gl) { GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); lockref_get(&gl->gl_lockref); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse1650.00%266.67%
david teiglanddavid teigland1650.00%133.33%
Total32100.00%3100.00%

/** * demote_ok - Check to see if it's ok to unlock a glock * @gl: the glock * * Returns: 1 if it's ok */
static int demote_ok(const struct gfs2_glock *gl) { const struct gfs2_glock_operations *glops = gl->gl_ops; if (gl->gl_state == LM_ST_UNLOCKED) return 0; if (!list_empty(&gl->gl_holders)) return 0; if (glops->go_demote_ok) return glops->go_demote_ok(gl); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
benjamin marzinskibenjamin marzinski64100.00%1100.00%
Total64100.00%1100.00%


void gfs2_glock_add_to_lru(struct gfs2_glock *gl) { spin_lock(&lru_lock); if (!list_empty(&gl->gl_lru)) list_del_init(&gl->gl_lru); else atomic_inc(&lru_count); list_add_tail(&gl->gl_lru, &lru_list); set_bit(GLF_LRU, &gl->gl_flags); spin_unlock(&lru_lock); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse69100.00%4100.00%
Total69100.00%4100.00%


static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) { spin_lock(&lru_lock); if (!list_empty(&gl->gl_lru)) { list_del_init(&gl->gl_lru); atomic_dec(&lru_count); clear_bit(GLF_LRU, &gl->gl_flags); } spin_unlock(&lru_lock); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse5388.33%375.00%
robert s. petersonrobert s. peterson711.67%125.00%
Total60100.00%4100.00%

/** * gfs2_glock_put() - Decrement reference count on glock * @gl: The glock to put * */
void gfs2_glock_put(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct address_space *mapping = gfs2_glock2aspace(gl); if (lockref_put_or_lock(&gl->gl_lockref)) return; lockref_mark_dead(&gl->gl_lockref); gfs2_glock_remove_from_lru(gl); spin_unlock(&gl->gl_lockref.lock); rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); GLOCK_BUG_ON(gl, mapping && mapping->nrpages); trace_gfs2_glock_put(gl); sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse9075.63%1066.67%
david teiglanddavid teigland1613.45%16.67%
robert s. petersonrobert s. peterson119.24%320.00%
benjamin marzinskibenjamin marzinski21.68%16.67%
Total119100.00%15100.00%

/** * may_grant - check if its ok to grant a new lock * @gl: The glock * @gh: The lock request which we wish to grant * * Returns: true if its ok to grant the lock */
static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) { const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); if ((gh->gh_state == LM_ST_EXCLUSIVE || gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) return 0; if (gl->gl_state == gh->gh_state) return 1; if (gh->gh_flags & GL_EXACT) return 0; if (gl->gl_state == LM_ST_EXCLUSIVE) { if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) return 1; if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) return 1; } if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse12783.01%266.67%
benjamin marzinskibenjamin marzinski2616.99%133.33%
Total153100.00%3100.00%


static void gfs2_holder_wake(struct gfs2_holder *gh) { clear_bit(HIF_WAIT, &gh->gh_iflags); smp_mb__after_atomic(); wake_up_bit(&gh->gh_iflags, HIF_WAIT); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse3397.06%266.67%
peter zijlstrapeter zijlstra12.94%133.33%
Total34100.00%3100.00%

/** * do_error - Something unexpected has happened during a lock request * */
static void do_error(struct gfs2_glock *gl, const int ret) { struct gfs2_holder *gh, *tmp; list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { if (test_bit(HIF_HOLDER, &gh->gh_iflags)) continue; if (ret & LM_OUT_ERROR) gh->gh_error = -EIO; else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) gh->gh_error = GLR_TRYFAILED; else continue; list_del_init(&gh->gh_list); trace_gfs2_glock_queue(gh, 0); gfs2_holder_wake(gh); } }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse102100.00%1100.00%
Total102100.00%1100.00%

/** * do_promote - promote as many requests as possible on the current queue * @gl: The glock * * Returns: 1 if there is a blocked holder at the head of the list, or 2 * if a type specific operation is underway. */
static int do_promote(struct gfs2_glock *gl) __releases(&gl->gl_lockref.lock

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse541.67%125.00%
david teiglanddavid teigland433.33%125.00%
harvey harrisonharvey harrison216.67%125.00%
andreas gruenbacherandreas gruenbacher18.33%125.00%
Total12100.00%4100.00%

) __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh, *tmp; int ret; restart: list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { if (test_bit(HIF_HOLDER, &gh->gh_iflags)) continue; if (may_grant(gl, gh)) { if (gh->gh_list.prev == &gl->gl_holders && glops->go_lock) { spin_unlock(&gl->gl_lockref.lock); /* FIXME: eliminate this eventually */ ret = glops->go_lock(gh); spin_lock(&gl->gl_lockref.lock); if (ret) { if (ret == 1) return 2; gh->gh_error = ret; list_del_init(&gh->gh_list); trace_gfs2_glock_queue(gh, 0); gfs2_holder_wake(gh); goto restart; } set_bit(HIF_HOLDER, &gh->gh_iflags); trace_gfs2_promote(gh, 1); gfs2_holder_wake(gh); goto restart; } set_bit(HIF_HOLDER, &gh->gh_iflags); trace_gfs2_promote(gh, 0); gfs2_holder_wake(gh); continue; } if (gh->gh_list.prev == &gl->gl_holders) return 1; do_error(gl, 0); break; } return 0; } /** * find_first_waiter - find the first gh that's waiting for the glock * @gl: the glock */
static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) { struct gfs2_holder *gh; list_for_each_entry(gh, &gl->gl_holders, gh_list) { if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) return gh; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse3163.27%133.33%
david teiglanddavid teigland1632.65%133.33%
pavel emelianovpavel emelianov24.08%133.33%
Total49100.00%3100.00%

/** * state_change - record that the glock is now in a different state * @gl: the glock * @new_state the new state * */
static void state_change(struct gfs2_glock *gl, unsigned int new_state) { int held1, held2; held1 = (gl->gl_state != LM_ST_UNLOCKED); held2 = (new_state != LM_ST_UNLOCKED); if (held1 != held2) { GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); if (held2) gl->gl_lockref.count++; else gl->gl_lockref.count--; } if (held1 && held2 && list_empty(&gl->gl_holders)) clear_bit(GLF_QUEUED, &gl->gl_flags); if (new_state != gl->gl_target) /* shorten our minimum hold time */ gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, GL_GLOCK_MIN_HOLD); gl->gl_state = new_state; gl->gl_tchange = jiffies; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse11281.16%466.67%
robert s. petersonrobert s. peterson2417.39%116.67%
abhijith dasabhijith das21.45%116.67%
Total138100.00%6100.00%


static void gfs2_demote_wake(struct gfs2_glock *gl) { gl->gl_demote_state = LM_ST_EXCLUSIVE; clear_bit(GLF_DEMOTE, &gl->gl_flags); smp_mb__after_atomic(); wake_up_bit(&gl->gl_flags, GLF_DEMOTE); }

Contributors

PersonTokensPropCommitsCommitProp
abhijith dasabhijith das3382.50%133.33%
benjamin marzinskibenjamin marzinski615.00%133.33%
peter zijlstrapeter zijlstra12.50%133.33%
Total40100.00%3100.00%

/** * finish_xmote - The DLM has replied to one of our lock requests * @gl: The glock * @ret: The status from the DLM * */
static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh; unsigned state = ret & LM_OUT_ST_MASK; int rv; spin_lock(&gl->gl_lockref.lock); trace_gfs2_glock_state_change(gl, state); state_change(gl, state); gh = find_first_waiter(gl); /* Demote to UN request arrived during demote to SH or DF */ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) gl->gl_target = LM_ST_UNLOCKED; /* Check for state != intended state */ if (unlikely(state != gl->gl_target)) { if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { /* move to back of queue and try next entry */ if (ret & LM_OUT_CANCELED) { if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) list_move_tail(&gh->gh_list, &gl->gl_holders); gh = find_first_waiter(gl); gl->gl_target = gh->gh_state; goto retry; } /* Some error or failed "try lock" - report it */ if ((ret & LM_OUT_ERROR) || (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { gl->gl_target = gl->gl_state; do_error(gl, ret); goto out; } } switch(state) { /* Unlocked due to conversion deadlock, try again */ case LM_ST_UNLOCKED: retry: do_xmote(gl, gh, gl->gl_target); break; /* Conversion fails, unlock and try again */ case LM_ST_SHARED: case LM_ST_DEFERRED: do_xmote(gl, gh, LM_ST_UNLOCKED); break; default: /* Everything else */ pr_err("wanted %u got %u\n", gl->gl_target, state); GLOCK_BUG_ON(gl, 1); } spin_unlock(&gl->gl_lockref.lock); return; } /* Fast path - we got what we asked for */ if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) gfs2_demote_wake(gl); if (state != LM_ST_UNLOCKED) { if (glops->go_xmote_bh) { spin_unlock(&gl->gl_lockref.lock); rv = glops->go_xmote_bh(gl, gh); spin_lock(&gl->gl_lockref.lock); if (rv) { do_error(gl, rv); goto out; } } rv = do_promote(gl); if (rv == 2) goto out_locked; } out: clear_bit(GLF_LOCK, &gl->gl_flags); out_locked: spin_unlock(&gl->gl_lockref.lock); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse29169.78%550.00%
david teiglanddavid teigland9823.50%110.00%
andreas gruenbacherandreas gruenbacher153.60%110.00%
abhijith dasabhijith das112.64%110.00%
joe perchesjoe perches10.24%110.00%
fabian frederickfabian frederick10.24%110.00%
Total417100.00%10100.00%

/** * do_xmote - Calls the DLM to change the state of a lock * @gl: The lock state * @gh: The holder (only for promotes) * @target: The target lock state * */
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) __releases(&gl->gl_lockref.lock

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse1885.71%133.33%
harvey harrisonharvey harrison29.52%133.33%
andreas gruenbacherandreas gruenbacher14.76%133.33%
Total21100.00%3100.00%

) __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); int ret; lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | LM_FLAG_PRIORITY); GLOCK_BUG_ON(gl, gl->gl_state == target); GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && glops->go_inval) { set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); do_error(gl, 0); /* Fail queued try locks */ } gl->gl_req = target; set_bit(GLF_BLOCKING, &gl->gl_flags); if ((gl->gl_req == LM_ST_UNLOCKED) || (gl->gl_state == LM_ST_EXCLUSIVE) || (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) clear_bit(GLF_BLOCKING, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); if (glops->go_sync) glops->go_sync(gl); if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); gfs2_glock_hold(gl); if (sdp->sd_lockstruct.ls_ops->lm_lock) { /* lock_dlm */ ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && target == LM_ST_UNLOCKED && test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { finish_xmote(gl, target); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); } else if (ret) { pr_err("lm_lock ret %d\n", ret); GLOCK_BUG_ON(gl, 1); } } else { /* lock_nolock */ finish_xmote(gl, target); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); } spin_lock(&gl->gl_lockref.lock); } /** * find_first_holder - find the first "holder" gh * @gl: the glock */
static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) { struct gfs2_holder *gh; if (!list_empty(&gl->gl_holders)) { gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); if (test_bit(HIF_HOLDER, &gh->gh_iflags)) return gh; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse3856.72%266.67%
david teiglanddavid teigland2943.28%133.33%
Total67100.00%3100.00%

/** * run_queue - do all outstanding tasks related to a glock * @gl: The glock in question * @nonblock: True if we must not block in run_queue * */
static void run_queue(struct gfs2_glock *gl, const int nonblock) __releases(&gl->gl_lockref.lock

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland743.75%120.00%
steven whitehousesteven whitehouse637.50%240.00%
harvey harrisonharvey harrison212.50%120.00%
andreas gruenbacherandreas gruenbacher16.25%120.00%
Total16100.00%5100.00%

) __acquires(&gl->gl_lockref.lock) { struct gfs2_holder *gh = NULL; int ret; if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) return; GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); if (test_bit(GLF_DEMOTE, &gl->gl_flags) && gl->gl_demote_state != gl->gl_state) { if (find_first_holder(gl)) goto out_unlock; if (nonblock) goto out_sched; set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; } else { if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gfs2_demote_wake(gl); ret = do_promote(gl); if (ret == 0) goto out_unlock; if (ret == 2) goto out; gh = find_first_waiter(gl); gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) do_error(gl, 0); /* Fail queued try locks */ } do_xmote(gl, gh, gl->gl_target); out: return; out_sched: clear_bit(GLF_LOCK, &gl->gl_flags); smp_mb__after_atomic(); gl->gl_lockref.count++; if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gl->gl_lockref.count--; return; out_unlock: clear_bit(GLF_LOCK, &gl->gl_flags); smp_mb__after_atomic(); return; }
static void delete_work_func(struct work_struct *work) { struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct inode *inode; u64 no_addr = gl->gl_name.ln_number; /* If someone's using this glock to create a new dinode, the block must have been freed by another node, then re-used, in which case our iopen callback is too late after the fact. Ignore it. */ if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) goto out; inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); if (inode && !IS_ERR(inode)) { d_prune_aliases(inode); iput(inode); } out: gfs2_glock_put(gl); }

Contributors

PersonTokensPropCommitsCommitProp
benjamin marzinskibenjamin marzinski7366.97%125.00%
robert s. petersonrobert s. peterson2119.27%250.00%
steven whitehousesteven whitehouse1513.76%125.00%
Total109100.00%4100.00%


static void glock_work_func(struct work_struct *work) { unsigned long delay = 0; struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); int drop_ref = 0; if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { finish_xmote(gl, gl->gl_reply); drop_ref = 1; } spin_lock(&gl->gl_lockref.lock); if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl->gl_state != LM_ST_UNLOCKED && gl->gl_demote_state != LM_ST_EXCLUSIVE) { unsigned long holdtime, now = jiffies; holdtime = gl->gl_tchange + gl->gl_hold_time; if (time_before(now, holdtime)) delay = holdtime - now; if (!delay) { clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); set_bit(GLF_DEMOTE, &gl->gl_flags); } } run_queue(gl, 0); spin_unlock(&gl->gl_lockref.lock); if (!delay) gfs2_glock_put(gl); else { if (gl->gl_name.ln_type != LM_TYPE_INODE) delay = 0; if