Release 4.10 fs/gfs2/quota.c
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
/*
* Quota change tags are associated with each transaction that allocates or
* deallocates space. Those changes are accumulated locally to each node (in a
* per-node file) and then are periodically synced to the quota file. This
* avoids the bottleneck of constantly touching the quota file, but introduces
* fuzziness in the current usage value of IDs that are being used on different
* nodes in the cluster simultaneously. So, it is possible for a user on
* multiple nodes to overrun their quota, but that overrun is controlable.
* Since quota tags are part of transactions, there is no need for a quota check
* program to be run on node crashes or anything like that.
*
* There are couple of knobs that let the administrator manage the quota
* fuzziness. "quota_quantum" sets the maximum time a quota change can be
* sitting on one node before being synced to the quota file. (The default is
* 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
* of quota file syncs increases as the user moves closer to their limit. The
* more frequent the syncs, the more accurate the quota enforcement, but that
* means that there is more contention between the nodes for the quota file.
* The default value is one. This sets the maximum theoretical quota overrun
* (with infinite node with infinite bandwidth) to twice the user's limit. (In
* practice, the maximum overrun you see should be much less.) A "quota_scale"
* number greater than one makes quota syncs more frequent and reduces the
* maximum overrun. Numbers less than one (but greater than zero) make quota
* syncs less frequent.
*
* GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
* the quota file, so it is not being constantly read.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/gfs2_ondisk.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#include <linux/lockref.h>
#include <linux/list_lru.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
#include <linux/jhash.h>
#include <linux/vmalloc.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
#include "inode.h"
#include "util.h"
#define GFS2_QD_HASH_SHIFT 12
#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
/* -> sd_bitmap_lock */
static DEFINE_SPINLOCK(qd_lock);
struct list_lru gfs2_qd_lru;
static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
const struct kqid qid)
{
unsigned int h;
h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
h = jhash(&qid, sizeof(struct kqid), h);
return h & GFS2_QD_HASH_MASK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 60 | 100.00% | 1 | 100.00% |
| Total | 60 | 100.00% | 1 | 100.00% |
static inline void spin_lock_bucket(unsigned int hash)
{
hlist_bl_lock(&qd_hash_table[hash]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 20 | 100.00% | 1 | 100.00% |
| Total | 20 | 100.00% | 1 | 100.00% |
static inline void spin_unlock_bucket(unsigned int hash)
{
hlist_bl_unlock(&qd_hash_table[hash]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 20 | 100.00% | 1 | 100.00% |
| Total | 20 | 100.00% | 1 | 100.00% |
static void gfs2_qd_dealloc(struct rcu_head *rcu)
{
struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
kmem_cache_free(gfs2_quotad_cachep, qd);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 33 | 100.00% | 1 | 100.00% |
| Total | 33 | 100.00% | 1 | 100.00% |
static void gfs2_qd_dispose(struct list_head *list)
{
struct gfs2_quota_data *qd;
struct gfs2_sbd *sdp;
while (!list_empty(list)) {
qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
sdp = qd->qd_gl->gl_name.ln_sbd;
list_del(&qd->qd_lru);
/* Free from the filesystem-specific list */
spin_lock(&qd_lock);
list_del(&qd->qd_list);
spin_unlock(&qd_lock);
spin_lock_bucket(qd->qd_hash);
hlist_bl_del_rcu(&qd->qd_hlist);
spin_unlock_bucket(qd->qd_hash);
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
gfs2_glock_put(qd->qd_gl);
atomic_dec(&sdp->sd_quota_count);
/* Delete it from the common reclaim list */
call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
abhijith das | abhijith das | 97 | 59.88% | 1 | 14.29% |
steven whitehouse | steven whitehouse | 60 | 37.04% | 3 | 42.86% |
robert s. peterson | robert s. peterson | 3 | 1.85% | 1 | 14.29% |
ying han | ying han | 1 | 0.62% | 1 | 14.29% |
dave chinner | dave chinner | 1 | 0.62% | 1 | 14.29% |
| Total | 162 | 100.00% | 7 | 100.00% |
static enum lru_status gfs2_qd_isolate(struct list_head *item,
struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *dispose = arg;
struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
if (!spin_trylock(&qd->qd_lockref.lock))
return LRU_SKIP;
if (qd->qd_lockref.count == 0) {
lockref_mark_dead(&qd->qd_lockref);
list_lru_isolate_move(lru, &qd->qd_lru, dispose);
}
spin_unlock(&qd->qd_lockref.lock);
return LRU_REMOVED;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 84 | 77.78% | 1 | 33.33% |
abhijith das | abhijith das | 15 | 13.89% | 1 | 33.33% |
vladimir davydov | vladimir davydov | 9 | 8.33% | 1 | 33.33% |
| Total | 108 | 100.00% | 3 | 100.00% |
static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
LIST_HEAD(dispose);
unsigned long freed;
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
gfs2_qd_isolate, &dispose);
gfs2_qd_dispose(&dispose);
return freed;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 52 | 81.25% | 1 | 25.00% |
dave chinner | dave chinner | 6 | 9.38% | 1 | 25.00% |
abhijith das | abhijith das | 5 | 7.81% | 1 | 25.00% |
vladimir davydov | vladimir davydov | 1 | 1.56% | 1 | 25.00% |
| Total | 64 | 100.00% | 4 | 100.00% |
static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 15 | 51.72% | 1 | 20.00% |
abhijith das | abhijith das | 6 | 20.69% | 1 | 20.00% |
steven whitehouse | steven whitehouse | 4 | 13.79% | 1 | 20.00% |
glauber costa | glauber costa | 3 | 10.34% | 1 | 20.00% |
vladimir davydov | vladimir davydov | 1 | 3.45% | 1 | 20.00% |
| Total | 29 | 100.00% | 5 | 100.00% |
struct shrinker gfs2_qd_shrinker = {
.count_objects = gfs2_qd_shrink_count,
.scan_objects = gfs2_qd_shrink_scan,
.seeks = DEFAULT_SEEKS,
.flags = SHRINKER_NUMA_AWARE,
};
static u64 qd2index(struct gfs2_quota_data *qd)
{
struct kqid qid = qd->qd_id;
return (2 * (u64)from_kqid(&init_user_ns, qid)) +
((qid.type == USRQUOTA) ? 0 : 1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric w. biederman | eric w. biederman | 29 | 59.18% | 2 | 40.00% |
david teigland | david teigland | 16 | 32.65% | 1 | 20.00% |
steven whitehouse | steven whitehouse | 2 | 4.08% | 1 | 20.00% |
robert s. peterson | robert s. peterson | 2 | 4.08% | 1 | 20.00% |
| Total | 49 | 100.00% | 5 | 100.00% |
static u64 qd2offset(struct gfs2_quota_data *qd)
{
u64 offset;
offset = qd2index(qd);
offset *= sizeof(struct gfs2_quota);
return offset;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric w. biederman | eric w. biederman | 17 | 53.12% | 1 | 50.00% |
david teigland | david teigland | 15 | 46.88% | 1 | 50.00% |
| Total | 32 | 100.00% | 2 | 100.00% |
static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
{
struct gfs2_quota_data *qd;
int error;
qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
if (!qd)
return NULL;
qd->qd_sbd = sdp;
qd->qd_lockref.count = 1;
spin_lock_init(&qd->qd_lockref.lock);
qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_lru);
qd->qd_hash = hash;
error = gfs2_glock_get(sdp, qd2index(qd),
&gfs2_quota_glops, CREATE, &qd->qd_gl);
if (error)
goto fail;
return qd;
fail:
kmem_cache_free(gfs2_quotad_cachep, qd);
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 80 | 57.14% | 1 | 11.11% |
steven whitehouse | steven whitehouse | 41 | 29.29% | 4 | 44.44% |
abhijith das | abhijith das | 10 | 7.14% | 1 | 11.11% |
eric w. biederman | eric w. biederman | 8 | 5.71% | 2 | 22.22% |
josef bacik | josef bacik | 1 | 0.71% | 1 | 11.11% |
| Total | 140 | 100.00% | 9 | 100.00% |
static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
const struct gfs2_sbd *sdp,
struct kqid qid)
{
struct gfs2_quota_data *qd;
struct hlist_bl_node *h;
hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
if (!qid_eq(qd->qd_id, qid))
continue;
if (qd->qd_sbd != sdp)
continue;
if (lockref_get_not_dead(&qd->qd_lockref)) {
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
return qd;
}
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 93 | 97.89% | 1 | 50.00% |
david teigland | david teigland | 2 | 2.11% | 1 | 50.00% |
| Total | 95 | 100.00% | 2 | 100.00% |
static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd, *new_qd;
unsigned int hash = gfs2_qd_hash(sdp, qid);
rcu_read_lock();
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
rcu_read_unlock();
if (qd)
return 0;
new_qd = qd_alloc(hash, sdp, qid);
if (!new_qd)
return -ENOMEM;
spin_lock(&qd_lock);
spin_lock_bucket(hash);
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
if (qd == NULL) {
*qdp = new_qd;
list_add(&new_qd->qd_list, &sdp->sd_quota_list);
hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
atomic_inc(&sdp->sd_quota_count);
}
spin_unlock_bucket(hash);
spin_unlock(&qd_lock);
if (qd) {
gfs2_glock_put(new_qd->qd_gl);
kmem_cache_free(gfs2_quotad_cachep, new_qd);
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 98 | 50.52% | 1 | 14.29% |
steven whitehouse | steven whitehouse | 86 | 44.33% | 4 | 57.14% |
abhijith das | abhijith das | 6 | 3.09% | 1 | 14.29% |
eric w. biederman | eric w. biederman | 4 | 2.06% | 1 | 14.29% |
| Total | 194 | 100.00% | 7 | 100.00% |
static void qd_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
lockref_get(&qd->qd_lockref);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 34 | 73.91% | 1 | 25.00% |
steven whitehouse | steven whitehouse | 5 | 10.87% | 1 | 25.00% |
abhijith das | abhijith das | 4 | 8.70% | 1 | 25.00% |
robert s. peterson | robert s. peterson | 3 | 6.52% | 1 | 25.00% |
| Total | 46 | 100.00% | 4 | 100.00% |
static void qd_put(struct gfs2_quota_data *qd)
{
if (lockref_put_or_lock(&qd->qd_lockref))
return;
qd->qd_lockref.count = 0;
list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
spin_unlock(&qd->qd_lockref.lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 25 | 49.02% | 1 | 25.00% |
steven whitehouse | steven whitehouse | 20 | 39.22% | 2 | 50.00% |
abhijith das | abhijith das | 6 | 11.76% | 1 | 25.00% |
| Total | 51 | 100.00% | 4 | 100.00% |
static int slot_get(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
unsigned int bit;
int error = 0;
spin_lock(&sdp->sd_bitmap_lock);
if (qd->qd_slot_count != 0)
goto out;
error = -ENOSPC;
bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
if (bit < sdp->sd_quota_slots) {
set_bit(bit, sdp->sd_quota_bitmap);
qd->qd_slot = bit;
error = 0;
out:
qd->qd_slot_count++;
}
spin_unlock(&sdp->sd_bitmap_lock);
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 67 | 59.29% | 1 | 25.00% |
steven whitehouse | steven whitehouse | 42 | 37.17% | 2 | 50.00% |
abhijith das | abhijith das | 4 | 3.54% | 1 | 25.00% |
| Total | 113 | 100.00% | 4 | 100.00% |
static void slot_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
spin_lock(&sdp->sd_bitmap_lock);
gfs2_assert(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
spin_unlock(&sdp->sd_bitmap_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 43 | 86.00% | 1 | 33.33% |
steven whitehouse | steven whitehouse | 7 | 14.00% | 2 | 66.67% |
| Total | 50 | 100.00% | 3 | 100.00% |
static void slot_put(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
spin_lock(&sdp->sd_bitmap_lock);
gfs2_assert(sdp, qd->qd_slot_count);
if (!--qd->qd_slot_count) {
BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
qd->qd_slot = -1;
}
spin_unlock(&sdp->sd_bitmap_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 62 | 80.52% | 1 | 33.33% |
steven whitehouse | steven whitehouse | 15 | 19.48% | 2 | 66.67% |
| Total | 77 | 100.00% | 3 | 100.00% |
static int bh_get(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
unsigned int block, offset;
struct buffer_head *bh;
int error;
struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
mutex_lock(&sdp->sd_quota_mutex);
if (qd->qd_bh_count++) {
mutex_unlock(&sdp->sd_quota_mutex);
return 0;
}
block = qd->qd_slot / sdp->sd_qc_per_block;
offset = qd->qd_slot % sdp->sd_qc_per_block;
bh_map.b_size = BIT(ip->i_inode.i_blkbits);
error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
if (error)
goto fail;
error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
if (error)
goto fail;
error = -EIO;
if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
goto fail_brelse;
qd->qd_bh = bh;
qd->qd_bh_qc = (struct gfs2_quota_change *)
(bh->b_data + sizeof(struct gfs2_meta_header) +
offset * sizeof(struct gfs2_quota_change));
mutex_unlock(&sdp->sd_quota_mutex);
return 0;
fail_brelse:
brelse(bh);
fail:
qd->qd_bh_count--;
mutex_unlock(&sdp->sd_quota_mutex);
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 218 | 81.34% | 1 | 9.09% |
steven whitehouse | steven whitehouse | 39 | 14.55% | 5 | 45.45% |
robert s. peterson | robert s. peterson | 5 | 1.87% | 2 | 18.18% |
fabian frederick | fabian frederick | 3 | 1.12% | 1 | 9.09% |
andreas gruenbacher | andreas gruenbacher | 2 | 0.75% | 1 | 9.09% |
josef whiter | josef whiter | 1 | 0.37% | 1 | 9.09% |
| Total | 268 | 100.00% | 11 | 100.00% |
static void bh_put(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
mutex_lock(&sdp->sd_quota_mutex);
gfs2_assert(sdp, qd->qd_bh_count);
if (!--qd->qd_bh_count) {
brelse(qd->qd_bh);
qd->qd_bh = NULL;
qd->qd_bh_qc = NULL;
}
mutex_unlock(&sdp->sd_quota_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 73 | 93.59% | 1 | 33.33% |
robert s. peterson | robert s. peterson | 3 | 3.85% | 1 | 33.33% |
steven whitehouse | steven whitehouse | 2 | 2.56% | 1 | 33.33% |
| Total | 78 | 100.00% | 3 | 100.00% |
static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
u64 *sync_gen)
{
if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
!test_bit(QDF_CHANGE, &qd->qd_flags) ||
(sync_gen && (qd->qd_sync_gen >= *sync_gen)))
return 0;
if (!lockref_get_not_dead(&qd->qd_lockref))
return 0;
list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
set_bit(QDF_LOCKED, &qd->qd_flags);
qd->qd_change_sync = qd->qd_change;
slot_hold(qd);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 77 | 68.75% | 1 | 25.00% |
steven whitehouse | steven whitehouse | 35 | 31.25% | 3 | 75.00% |
| Total | 112 | 100.00% | 4 | 100.00% |
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd = NULL;
int error;
int found = 0;
*qdp = NULL;
if (sdp->sd_vfs->s_flags & MS_RDONLY)
return 0;
spin_lock(&qd_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
if (found)
break;
}
if (!found)
qd = NULL;
spin_unlock(&qd_lock);
if (qd) {
gfs2_assert_warn(sdp, qd->qd_change_sync);
error = bh_get(qd);
if (error) {
clear_bit(QDF_LOCKED, &qd->qd_flags);
slot_put(qd);
qd_put(qd);
return error;
}
}
*qdp = qd;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven whitehouse | steven whitehouse | 80 | 50.31% | 2 | 50.00% |
david teigland | david teigland | 77 | 48.43% | 1 | 25.00% |
abhijith das | abhijith das | 2 | 1.26% | 1 | 25.00% |
| Total | 159 | 100.00% | 4 | 100.00% |
static void qd_unlock(struct gfs2_quota_data *qd)
{
gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
test_bit(QDF_LOCKED, &qd->qd_flags));
clear_bit(QDF_LOCKED, &qd->qd_flags);
bh_put(qd);
slot_put(qd);
qd_put(qd);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 54 | 94.74% | 1 | 50.00% |
robert s. peterson | robert s. peterson | 3 | 5.26% | 1 | 50.00% |
| Total | 57 | 100.00% | 2 | 100.00% |
static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
struct gfs2_quota_data **qdp)
{
int error;
error = qd_get(sdp, qid, qdp);
if (error)
return error;
error = slot_get(*qdp);
if (error)
goto fail;
error = bh_get(*qdp);
if (error)
goto fail_slot;
return 0;
fail_slot:
slot_put(*qdp);
fail:
qd_put(*qdp);
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 90 | 95.74% | 1 | 50.00% |
eric w. biederman | eric w. biederman | 4 | 4.26% | 1 | 50.00% |
| Total | 94 | 100.00% | 2 | 100.00% |
static void qdsb_put(struct gfs2_quota_data *qd)
{
bh_put(qd);
slot_put(qd);
qd_put(qd);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 26 | 100.00% | 1 | 100.00% |
| Total | 26 | 100.00% | 1 | 100.00% |
/**
* gfs2_qa_alloc - make sure we have a quota allocations data structure,
* if necessary
* @ip: the inode for this reservation
*/
int gfs2_qa_alloc(struct gfs2_inode *ip)
{
int error = 0;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
down_write(&ip->i_rw_mutex);
if (ip->i_qadata == NULL) {
ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
if (!ip->i_qadata)
error = -ENOMEM;
}
up_write(&ip->i_rw_mutex);
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert s. peterson | robert s. peterson | 93 | 100.00% | 1 | 100.00% |
| Total | 93 | 100.00% | 1 | 100.00% |
void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount)
{
down_write(&ip->i_rw_mutex);
if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
ip->i_qadata = NULL;
}
up_write(&ip->i_rw_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert s. peterson | robert s. peterson | 70 | 100.00% | 2 | 100.00% |
| Total | 70 | 100.00% | 2 | 100.00% |
int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data **qd;
int error;
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
if (ip->i_qadata == NULL) {
error = gfs2_rsqa_alloc(ip);
if (error)
return error;
}
qd = ip->i_qadata->qa_qd;
if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
return -EIO;
error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
if (error)
goto out;
ip->i_qadata->qa_qd_num++;
qd++;
error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
if (error)
goto out;
ip->i_qadata->qa_qd_num++;
qd++;
if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
!uid_eq(uid, ip->i_inode.i_uid)) {
error = qdsb_get(sdp, make_kqid_uid(uid), qd);
if (error)
goto out;
ip->i_qadata->qa_qd_num++;
qd++;
}
if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
!gid_eq(gid, ip->i_inode.i_gid)) {
error = qdsb_get(sdp, make_kqid_gid(gid), qd);
if (error)
goto out;
ip->i_qadata->qa_qd_num++;
qd++;
}
out:
if (error)
gfs2_quota_unhold(ip);
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 200 | 64.31% | 1 | 10.00% |
robert s. peterson | robert s. peterson | 51 | 16.40% | 2 | 20.00% |
eric w. biederman | eric w. biederman | 36 | 11.58% | 4 | 40.00% |
steven whitehouse | steven whitehouse | 13 | 4.18% | 2 | 20.00% |
andrew price | andrew price | 11 | 3.54% | 1 | 10.00% |
| Total | 311 | 100.00% | 10 | 100.00% |
void gfs2_quota_unhold(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
u32 x;
if (ip->i_qadata == NULL)
return;
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
qdsb_put(ip->i_qadata->qa_qd[x]);
ip->i_qadata->qa_qd[x] = NULL;
}
ip->i_qadata->qa_qd_num = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david teigland | david teigland | 71 | 70.30% | 1 | 20.00% |
robert s. peterson | robert s. peterson | 25 | 24.75% | 3 | 60.00% |
steven whitehouse | steven whitehouse | 5 | 4.95% | 1 | 20.00% |
| Total | 101 | 100.00% | 5 | 100.00% |
static int sort_qd(const void *a, const void *b)
{
const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
if (qid_lt(qd_a->qd_id, qd_b->qd_id))
return -1;
if (qid_lt(qd_b->qd_id, qd_a->qd_id))