cregit-Linux how code gets into the kernel

Release 4.10 fs/gfs2/quota.c

Directory: fs/gfs2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
 * of the GNU General Public License version 2.
 */

/*
 * Quota change tags are associated with each transaction that allocates or
 * deallocates space.  Those changes are accumulated locally to each node (in a
 * per-node file) and then are periodically synced to the quota file.  This
 * avoids the bottleneck of constantly touching the quota file, but introduces
 * fuzziness in the current usage value of IDs that are being used on different
 * nodes in the cluster simultaneously.  So, it is possible for a user on
 * multiple nodes to overrun their quota, but that overrun is controlable.
 * Since quota tags are part of transactions, there is no need for a quota check
 * program to be run on node crashes or anything like that.
 *
 * There are couple of knobs that let the administrator manage the quota
 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
 * sitting on one node before being synced to the quota file.  (The default is
 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
 * of quota file syncs increases as the user moves closer to their limit.  The
 * more frequent the syncs, the more accurate the quota enforcement, but that
 * means that there is more contention between the nodes for the quota file.
 * The default value is one.  This sets the maximum theoretical quota overrun
 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
 * number greater than one makes quota syncs more frequent and reduces the
 * maximum overrun.  Numbers less than one (but greater than zero) make quota
 * syncs less frequent.
 *
 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
 * the quota file, so it is not being constantly read.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/gfs2_ondisk.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#include <linux/lockref.h>
#include <linux/list_lru.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
#include <linux/jhash.h>
#include <linux/vmalloc.h>

#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
#include "inode.h"
#include "util.h"


#define GFS2_QD_HASH_SHIFT      12

#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)

#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)

/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
/*                     -> sd_bitmap_lock                              */
static DEFINE_SPINLOCK(qd_lock);

struct list_lru gfs2_qd_lru;


static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];


static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp, const struct kqid qid) { unsigned int h; h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0); h = jhash(&qid, sizeof(struct kqid), h); return h & GFS2_QD_HASH_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse60100.00%1100.00%
Total60100.00%1100.00%


static inline void spin_lock_bucket(unsigned int hash) { hlist_bl_lock(&qd_hash_table[hash]); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse20100.00%1100.00%
Total20100.00%1100.00%


static inline void spin_unlock_bucket(unsigned int hash) { hlist_bl_unlock(&qd_hash_table[hash]); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse20100.00%1100.00%
Total20100.00%1100.00%


static void gfs2_qd_dealloc(struct rcu_head *rcu) { struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); kmem_cache_free(gfs2_quotad_cachep, qd); }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse33100.00%1100.00%
Total33100.00%1100.00%


static void gfs2_qd_dispose(struct list_head *list) { struct gfs2_quota_data *qd; struct gfs2_sbd *sdp; while (!list_empty(list)) { qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); sdp = qd->qd_gl->gl_name.ln_sbd; list_del(&qd->qd_lru); /* Free from the filesystem-specific list */ spin_lock(&qd_lock); list_del(&qd->qd_list); spin_unlock(&qd_lock); spin_lock_bucket(qd->qd_hash); hlist_bl_del_rcu(&qd->qd_hlist); spin_unlock_bucket(qd->qd_hash); gfs2_assert_warn(sdp, !qd->qd_change); gfs2_assert_warn(sdp, !qd->qd_slot_count); gfs2_assert_warn(sdp, !qd->qd_bh_count); gfs2_glock_put(qd->qd_gl); atomic_dec(&sdp->sd_quota_count); /* Delete it from the common reclaim list */ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); } }

Contributors

PersonTokensPropCommitsCommitProp
abhijith dasabhijith das9759.88%114.29%
steven whitehousesteven whitehouse6037.04%342.86%
robert s. petersonrobert s. peterson31.85%114.29%
ying hanying han10.62%114.29%
dave chinnerdave chinner10.62%114.29%
Total162100.00%7100.00%


static enum lru_status gfs2_qd_isolate(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *dispose = arg; struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); if (!spin_trylock(&qd->qd_lockref.lock)) return LRU_SKIP; if (qd->qd_lockref.count == 0) { lockref_mark_dead(&qd->qd_lockref); list_lru_isolate_move(lru, &qd->qd_lru, dispose); } spin_unlock(&qd->qd_lockref.lock); return LRU_REMOVED; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse8477.78%133.33%
abhijith dasabhijith das1513.89%133.33%
vladimir davydovvladimir davydov98.33%133.33%
Total108100.00%3100.00%


static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { LIST_HEAD(dispose); unsigned long freed; if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP; freed = list_lru_shrink_walk(&gfs2_qd_lru, sc, gfs2_qd_isolate, &dispose); gfs2_qd_dispose(&dispose); return freed; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse5281.25%125.00%
dave chinnerdave chinner69.38%125.00%
abhijith dasabhijith das57.81%125.00%
vladimir davydovvladimir davydov11.56%125.00%
Total64100.00%4100.00%


static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc)); }

Contributors

PersonTokensPropCommitsCommitProp
dave chinnerdave chinner1551.72%120.00%
abhijith dasabhijith das620.69%120.00%
steven whitehousesteven whitehouse413.79%120.00%
glauber costaglauber costa310.34%120.00%
vladimir davydovvladimir davydov13.45%120.00%
Total29100.00%5100.00%

struct shrinker gfs2_qd_shrinker = { .count_objects = gfs2_qd_shrink_count, .scan_objects = gfs2_qd_shrink_scan, .seeks = DEFAULT_SEEKS, .flags = SHRINKER_NUMA_AWARE, };
static u64 qd2index(struct gfs2_quota_data *qd) { struct kqid qid = qd->qd_id; return (2 * (u64)from_kqid(&init_user_ns, qid)) + ((qid.type == USRQUOTA) ? 0 : 1); }

Contributors

PersonTokensPropCommitsCommitProp
eric w. biedermaneric w. biederman2959.18%240.00%
david teiglanddavid teigland1632.65%120.00%
steven whitehousesteven whitehouse24.08%120.00%
robert s. petersonrobert s. peterson24.08%120.00%
Total49100.00%5100.00%


static u64 qd2offset(struct gfs2_quota_data *qd) { u64 offset; offset = qd2index(qd); offset *= sizeof(struct gfs2_quota); return offset; }

Contributors

PersonTokensPropCommitsCommitProp
eric w. biedermaneric w. biederman1753.12%150.00%
david teiglanddavid teigland1546.88%150.00%
Total32100.00%2100.00%


static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid) { struct gfs2_quota_data *qd; int error; qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); if (!qd) return NULL; qd->qd_sbd = sdp; qd->qd_lockref.count = 1; spin_lock_init(&qd->qd_lockref.lock); qd->qd_id = qid; qd->qd_slot = -1; INIT_LIST_HEAD(&qd->qd_lru); qd->qd_hash = hash; error = gfs2_glock_get(sdp, qd2index(qd), &gfs2_quota_glops, CREATE, &qd->qd_gl); if (error) goto fail; return qd; fail: kmem_cache_free(gfs2_quotad_cachep, qd); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland8057.14%111.11%
steven whitehousesteven whitehouse4129.29%444.44%
abhijith dasabhijith das107.14%111.11%
eric w. biedermaneric w. biederman85.71%222.22%
josef bacikjosef bacik10.71%111.11%
Total140100.00%9100.00%


static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash, const struct gfs2_sbd *sdp, struct kqid qid) { struct gfs2_quota_data *qd; struct hlist_bl_node *h; hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) { if (!qid_eq(qd->qd_id, qid)) continue; if (qd->qd_sbd != sdp) continue; if (lockref_get_not_dead(&qd->qd_lockref)) { list_lru_del(&gfs2_qd_lru, &qd->qd_lru); return qd; } } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse9397.89%150.00%
david teiglanddavid teigland22.11%150.00%
Total95100.00%2100.00%


static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, struct gfs2_quota_data **qdp) { struct gfs2_quota_data *qd, *new_qd; unsigned int hash = gfs2_qd_hash(sdp, qid); rcu_read_lock(); *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); rcu_read_unlock(); if (qd) return 0; new_qd = qd_alloc(hash, sdp, qid); if (!new_qd) return -ENOMEM; spin_lock(&qd_lock); spin_lock_bucket(hash); *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); if (qd == NULL) { *qdp = new_qd; list_add(&new_qd->qd_list, &sdp->sd_quota_list); hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); atomic_inc(&sdp->sd_quota_count); } spin_unlock_bucket(hash); spin_unlock(&qd_lock); if (qd) { gfs2_glock_put(new_qd->qd_gl); kmem_cache_free(gfs2_quotad_cachep, new_qd); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland9850.52%114.29%
steven whitehousesteven whitehouse8644.33%457.14%
abhijith dasabhijith das63.09%114.29%
eric w. biedermaneric w. biederman42.06%114.29%
Total194100.00%7100.00%


static void qd_hold(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); lockref_get(&qd->qd_lockref); }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland3473.91%125.00%
steven whitehousesteven whitehouse510.87%125.00%
abhijith dasabhijith das48.70%125.00%
robert s. petersonrobert s. peterson36.52%125.00%
Total46100.00%4100.00%


static void qd_put(struct gfs2_quota_data *qd) { if (lockref_put_or_lock(&qd->qd_lockref)) return; qd->qd_lockref.count = 0; list_lru_add(&gfs2_qd_lru, &qd->qd_lru); spin_unlock(&qd->qd_lockref.lock); }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland2549.02%125.00%
steven whitehousesteven whitehouse2039.22%250.00%
abhijith dasabhijith das611.76%125.00%
Total51100.00%4100.00%


static int slot_get(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; unsigned int bit; int error = 0; spin_lock(&sdp->sd_bitmap_lock); if (qd->qd_slot_count != 0) goto out; error = -ENOSPC; bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); if (bit < sdp->sd_quota_slots) { set_bit(bit, sdp->sd_quota_bitmap); qd->qd_slot = bit; error = 0; out: qd->qd_slot_count++; } spin_unlock(&sdp->sd_bitmap_lock); return error; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland6759.29%125.00%
steven whitehousesteven whitehouse4237.17%250.00%
abhijith dasabhijith das43.54%125.00%
Total113100.00%4100.00%


static void slot_hold(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; spin_lock(&sdp->sd_bitmap_lock); gfs2_assert(sdp, qd->qd_slot_count); qd->qd_slot_count++; spin_unlock(&sdp->sd_bitmap_lock); }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland4386.00%133.33%
steven whitehousesteven whitehouse714.00%266.67%
Total50100.00%3100.00%


static void slot_put(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; spin_lock(&sdp->sd_bitmap_lock); gfs2_assert(sdp, qd->qd_slot_count); if (!--qd->qd_slot_count) { BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); qd->qd_slot = -1; } spin_unlock(&sdp->sd_bitmap_lock); }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland6280.52%133.33%
steven whitehousesteven whitehouse1519.48%266.67%
Total77100.00%3100.00%


static int bh_get(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); unsigned int block, offset; struct buffer_head *bh; int error; struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; mutex_lock(&sdp->sd_quota_mutex); if (qd->qd_bh_count++) { mutex_unlock(&sdp->sd_quota_mutex); return 0; } block = qd->qd_slot / sdp->sd_qc_per_block; offset = qd->qd_slot % sdp->sd_qc_per_block; bh_map.b_size = BIT(ip->i_inode.i_blkbits); error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); if (error) goto fail; error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh); if (error) goto fail; error = -EIO; if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) goto fail_brelse; qd->qd_bh = bh; qd->qd_bh_qc = (struct gfs2_quota_change *) (bh->b_data + sizeof(struct gfs2_meta_header) + offset * sizeof(struct gfs2_quota_change)); mutex_unlock(&sdp->sd_quota_mutex); return 0; fail_brelse: brelse(bh); fail: qd->qd_bh_count--; mutex_unlock(&sdp->sd_quota_mutex); return error; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland21881.34%19.09%
steven whitehousesteven whitehouse3914.55%545.45%
robert s. petersonrobert s. peterson51.87%218.18%
fabian frederickfabian frederick31.12%19.09%
andreas gruenbacherandreas gruenbacher20.75%19.09%
josef whiterjosef whiter10.37%19.09%
Total268100.00%11100.00%


static void bh_put(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; mutex_lock(&sdp->sd_quota_mutex); gfs2_assert(sdp, qd->qd_bh_count); if (!--qd->qd_bh_count) { brelse(qd->qd_bh); qd->qd_bh = NULL; qd->qd_bh_qc = NULL; } mutex_unlock(&sdp->sd_quota_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland7393.59%133.33%
robert s. petersonrobert s. peterson33.85%133.33%
steven whitehousesteven whitehouse22.56%133.33%
Total78100.00%3100.00%


static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, u64 *sync_gen) { if (test_bit(QDF_LOCKED, &qd->qd_flags) || !test_bit(QDF_CHANGE, &qd->qd_flags) || (sync_gen && (qd->qd_sync_gen >= *sync_gen))) return 0; if (!lockref_get_not_dead(&qd->qd_lockref)) return 0; list_move_tail(&qd->qd_list, &sdp->sd_quota_list); set_bit(QDF_LOCKED, &qd->qd_flags); qd->qd_change_sync = qd->qd_change; slot_hold(qd); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland7768.75%125.00%
steven whitehousesteven whitehouse3531.25%375.00%
Total112100.00%4100.00%


static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) { struct gfs2_quota_data *qd = NULL; int error; int found = 0; *qdp = NULL; if (sdp->sd_vfs->s_flags & MS_RDONLY) return 0; spin_lock(&qd_lock); list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); if (found) break; } if (!found) qd = NULL; spin_unlock(&qd_lock); if (qd) { gfs2_assert_warn(sdp, qd->qd_change_sync); error = bh_get(qd); if (error) { clear_bit(QDF_LOCKED, &qd->qd_flags); slot_put(qd); qd_put(qd); return error; } } *qdp = qd; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
steven whitehousesteven whitehouse8050.31%250.00%
david teiglanddavid teigland7748.43%125.00%
abhijith dasabhijith das21.26%125.00%
Total159100.00%4100.00%


static void qd_unlock(struct gfs2_quota_data *qd) { gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, test_bit(QDF_LOCKED, &qd->qd_flags)); clear_bit(QDF_LOCKED, &qd->qd_flags); bh_put(qd); slot_put(qd); qd_put(qd); }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland5494.74%150.00%
robert s. petersonrobert s. peterson35.26%150.00%
Total57100.00%2100.00%


static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid, struct gfs2_quota_data **qdp) { int error; error = qd_get(sdp, qid, qdp); if (error) return error; error = slot_get(*qdp); if (error) goto fail; error = bh_get(*qdp); if (error) goto fail_slot; return 0; fail_slot: slot_put(*qdp); fail: qd_put(*qdp); return error; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland9095.74%150.00%
eric w. biedermaneric w. biederman44.26%150.00%
Total94100.00%2100.00%


static void qdsb_put(struct gfs2_quota_data *qd) { bh_put(qd); slot_put(qd); qd_put(qd); }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland26100.00%1100.00%
Total26100.00%1100.00%

/** * gfs2_qa_alloc - make sure we have a quota allocations data structure, * if necessary * @ip: the inode for this reservation */
int gfs2_qa_alloc(struct gfs2_inode *ip) { int error = 0; struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; down_write(&ip->i_rw_mutex); if (ip->i_qadata == NULL) { ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); if (!ip->i_qadata) error = -ENOMEM; } up_write(&ip->i_rw_mutex); return error; }

Contributors

PersonTokensPropCommitsCommitProp
robert s. petersonrobert s. peterson93100.00%1100.00%
Total93100.00%1100.00%


void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount) { down_write(&ip->i_rw_mutex); if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) { kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); ip->i_qadata = NULL; } up_write(&ip->i_rw_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
robert s. petersonrobert s. peterson70100.00%2100.00%
Total70100.00%2100.00%


int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_quota_data **qd; int error; if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; if (ip->i_qadata == NULL) { error = gfs2_rsqa_alloc(ip); if (error) return error; } qd = ip->i_qadata->qa_qd; if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) return -EIO; error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); if (error) goto out; ip->i_qadata->qa_qd_num++; qd++; error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); if (error) goto out; ip->i_qadata->qa_qd_num++; qd++; if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) && !uid_eq(uid, ip->i_inode.i_uid)) { error = qdsb_get(sdp, make_kqid_uid(uid), qd); if (error) goto out; ip->i_qadata->qa_qd_num++; qd++; } if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) && !gid_eq(gid, ip->i_inode.i_gid)) { error = qdsb_get(sdp, make_kqid_gid(gid), qd); if (error) goto out; ip->i_qadata->qa_qd_num++; qd++; } out: if (error) gfs2_quota_unhold(ip); return error; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland20064.31%110.00%
robert s. petersonrobert s. peterson5116.40%220.00%
eric w. biedermaneric w. biederman3611.58%440.00%
steven whitehousesteven whitehouse134.18%220.00%
andrew priceandrew price113.54%110.00%
Total311100.00%10100.00%


void gfs2_quota_unhold(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); u32 x; if (ip->i_qadata == NULL) return; gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { qdsb_put(ip->i_qadata->qa_qd[x]); ip->i_qadata->qa_qd[x] = NULL; } ip->i_qadata->qa_qd_num = 0; }

Contributors

PersonTokensPropCommitsCommitProp
david teiglanddavid teigland7170.30%120.00%
robert s. petersonrobert s. peterson2524.75%360.00%
steven whitehousesteven whitehouse54.95%120.00%
Total101100.00%5100.00%


static int sort_qd(const void *a, const void *b) { const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; if (qid_lt(qd_a->qd_id, qd_b->qd_id)) return -1; if (qid_lt(qd_b->qd_id, qd_a->qd_id))