Release 4.7 drivers/staging/lustre/lnet/libcfs/hash.c
  
  
/*
 * GPL HEADER START
 *
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 only,
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License version 2 for more details (a copy is included
 * in the LICENSE file that accompanied this code).
 *
 * You should have received a copy of the GNU General Public License
 * version 2 along with this program; If not, see
 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
 *
 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 * CA 95054 USA or visit www.sun.com if you need additional information or
 * have any questions.
 *
 * GPL HEADER END
 */
/*
 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
 * Use is subject to license terms.
 *
 * Copyright (c) 2011, 2012, Intel Corporation.
 */
/*
 * This file is part of Lustre, http://www.lustre.org/
 * Lustre is a trademark of Sun Microsystems, Inc.
 *
 * libcfs/libcfs/hash.c
 *
 * Implement a hash class for hash process in lustre system.
 *
 * Author: YuZhangyong <yzy@clusterfs.com>
 *
 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
 * - Simplified API and improved documentation
 * - Added per-hash feature flags:
 *   * CFS_HASH_DEBUG additional validation
 *   * CFS_HASH_REHASH dynamic rehashing
 * - Added per-hash statistics
 * - General performance enhancements
 *
 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
 * - move all stuff to libcfs
 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
 * - buckets are allocated one by one(instead of contiguous memory),
 *   to avoid unnecessary cacheline conflict
 *
 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
 * - "bucket" is a group of hlist_head now, user can specify bucket size
 *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
 *   one lock for reducing memory overhead.
 *
 * - support lockless hash, caller will take care of locks:
 *   avoid lock overhead for hash tables that are already protected
 *   by locking in the caller for another reason
 *
 * - support both spin_lock/rwlock for bucket:
 *   overhead of spinlock contention is lower than read/write
 *   contention of rwlock, so using spinlock to serialize operations on
 *   bucket is more reasonable for those frequently changed hash tables
 *
 * - support one-single lock mode:
 *   one lock to protect all hash operations to avoid overhead of
 *   multiple locks if hash table is always small
 *
 * - removed a lot of unnecessary addref & decref on hash element:
 *   addref & decref are atomic operations in many use-cases which
 *   are expensive.
 *
 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
 *   some lustre use-cases require these functions to be strictly
 *   non-blocking, we need to schedule required rehash on a different
 *   thread on those cases.
 *
 * - safer rehash on large hash table
 *   In old implementation, rehash function will exclusively lock the
 *   hash table and finish rehash in one batch, it's dangerous on SMP
 *   system because rehash millions of elements could take long time.
 *   New implemented rehash can release lock and relax CPU in middle
 *   of rehash, it's safe for another thread to search/change on the
 *   hash table even it's in rehasing.
 *
 * - support two different refcount modes
 *   . hash table has refcount on element
 *   . hash table doesn't change refcount on adding/removing element
 *
 * - support long name hash table (for param-tree)
 *
 * - fix a bug for cfs_hash_rehash_key:
 *   in old implementation, cfs_hash_rehash_key could screw up the
 *   hash-table because @key is overwritten without any protection.
 *   Now we need user to define hs_keycpy for those rehash enabled
 *   hash tables, cfs_hash_rehash_key will overwrite hash-key
 *   inside lock by calling hs_keycpy.
 *
 * - better hash iteration:
 *   Now we support both locked iteration & lockless iteration of hash
 *   table. Also, user can break the iteration by return 1 in callback.
 */
#include <linux/seq_file.h>
#include <linux/log2.h>
#include "../../include/linux/libcfs/libcfs.h"
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static unsigned int warn_on_depth = 8;
module_param(warn_on_depth, uint, 0644);
MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
#endif
struct cfs_wi_sched *cfs_sched_rehash;
static inline void
cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 12 | 85.71% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 14.29% | 1 | 50.00% | 
 | Total | 14 | 100.00% | 2 | 100.00% | 
static inline void
cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 12 | 85.71% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 14.29% | 1 | 50.00% | 
 | Total | 14 | 100.00% | 2 | 100.00% | 
static inline void
cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
	__acquires(&lock->spinContributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 11 | 68.75% | 1 | 33.33% | 
| loic pefferkorn | loic pefferkorn | 3 | 18.75% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 12.50% | 1 | 33.33% | 
 | Total | 16 | 100.00% | 3 | 100.00% | 
)
{
	spin_lock(&lock->spin);
}
static inline void
cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
	__releases(&lock->spinContributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 11 | 68.75% | 1 | 33.33% | 
| loic pefferkorn | loic pefferkorn | 3 | 18.75% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 12.50% | 1 | 33.33% | 
 | Total | 16 | 100.00% | 3 | 100.00% | 
)
{
	spin_unlock(&lock->spin);
}
static inline void
cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
	__acquires(&lock->rwContributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 11 | 68.75% | 1 | 33.33% | 
| loic pefferkorn | loic pefferkorn | 3 | 18.75% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 12.50% | 1 | 33.33% | 
 | Total | 16 | 100.00% | 3 | 100.00% | 
)
{
	if (!exclusive)
		read_lock(&lock->rw);
	else
		write_lock(&lock->rw);
}
static inline void
cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
	__releases(&lock->rwContributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 11 | 68.75% | 1 | 33.33% | 
| loic pefferkorn | loic pefferkorn | 3 | 18.75% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 12.50% | 1 | 33.33% | 
 | Total | 16 | 100.00% | 3 | 100.00% | 
)
{
	if (!exclusive)
		read_unlock(&lock->rw);
	else
		write_unlock(&lock->rw);
}
/** No lock hash */
static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
	.hs_lock	= cfs_hash_nl_lock,
	.hs_unlock	= cfs_hash_nl_unlock,
	.hs_bkt_lock	= cfs_hash_nl_lock,
	.hs_bkt_unlock	= cfs_hash_nl_unlock,
};
/** no bucket lock, one spinlock to protect everything */
static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
	.hs_lock	= cfs_hash_spin_lock,
	.hs_unlock	= cfs_hash_spin_unlock,
	.hs_bkt_lock	= cfs_hash_nl_lock,
	.hs_bkt_unlock	= cfs_hash_nl_unlock,
};
/** spin bucket lock, rehash is enabled */
static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
	.hs_lock	= cfs_hash_rw_lock,
	.hs_unlock	= cfs_hash_rw_unlock,
	.hs_bkt_lock	= cfs_hash_spin_lock,
	.hs_bkt_unlock	= cfs_hash_spin_unlock,
};
/** rw bucket lock, rehash is enabled */
static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
	.hs_lock	= cfs_hash_rw_lock,
	.hs_unlock	= cfs_hash_rw_unlock,
	.hs_bkt_lock	= cfs_hash_rw_lock,
	.hs_bkt_unlock	= cfs_hash_rw_unlock,
};
/** spin bucket lock, rehash is disabled */
static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
	.hs_lock	= cfs_hash_nl_lock,
	.hs_unlock	= cfs_hash_nl_unlock,
	.hs_bkt_lock	= cfs_hash_spin_lock,
	.hs_bkt_unlock	= cfs_hash_spin_unlock,
};
/** rw bucket lock, rehash is disabled */
static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
	.hs_lock	= cfs_hash_nl_lock,
	.hs_unlock	= cfs_hash_nl_unlock,
	.hs_bkt_lock	= cfs_hash_rw_lock,
	.hs_bkt_unlock	= cfs_hash_rw_unlock,
};
static void
cfs_hash_lock_setup(struct cfs_hash *hs)
{
	if (cfs_hash_with_no_lock(hs)) {
		hs->hs_lops = &cfs_hash_nl_lops;
	} else if (cfs_hash_with_no_bktlock(hs)) {
		hs->hs_lops = &cfs_hash_nbl_lops;
		spin_lock_init(&hs->hs_lock.spin);
	} else if (cfs_hash_with_rehash(hs)) {
		rwlock_init(&hs->hs_lock.rw);
		if (cfs_hash_with_rw_bktlock(hs))
			hs->hs_lops = &cfs_hash_bkt_rw_lops;
		else if (cfs_hash_with_spin_bktlock(hs))
			hs->hs_lops = &cfs_hash_bkt_spin_lops;
		else
			LBUG();
	} else {
		if (cfs_hash_with_rw_bktlock(hs))
			hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
		else if (cfs_hash_with_spin_bktlock(hs))
			hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
		else
			LBUG();
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 141 | 98.60% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 1.40% | 1 | 50.00% | 
 | Total | 143 | 100.00% | 2 | 100.00% | 
/**
 * Simple hash head without depth tracking
 * new element is always added to head of hlist
 */
struct cfs_hash_head {
	
struct hlist_head	hh_head;	/**< entries list */
};
static int
cfs_hash_hh_hhead_size(struct cfs_hash *hs)
{
	return sizeof(struct cfs_hash_head);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 14 | 77.78% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 11.11% | 1 | 33.33% | 
| james simmons | james simmons | 2 | 11.11% | 1 | 33.33% | 
 | Total | 18 | 100.00% | 3 | 100.00% | 
static struct hlist_head *
cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
	struct cfs_hash_head *head;
	head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
	return &head[bd->bd_offset].hh_head;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 39 | 76.47% | 1 | 25.00% | 
| james simmons | james simmons | 8 | 15.69% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 7.84% | 2 | 50.00% | 
 | Total | 51 | 100.00% | 4 | 100.00% | 
static int
cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnode)
{
	hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
	return -1; /* unknown depth */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 34 | 89.47% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 10.53% | 2 | 66.67% | 
 | Total | 38 | 100.00% | 3 | 100.00% | 
static int
cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnode)
{
	hlist_del_init(hnode);
	return -1; /* unknown depth */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 27 | 87.10% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 12.90% | 2 | 66.67% | 
 | Total | 31 | 100.00% | 3 | 100.00% | 
/**
 * Simple hash head with depth tracking
 * new element is always added to head of hlist
 */
struct cfs_hash_head_dep {
	
struct hlist_head	hd_head;	/**< entries list */
	
unsigned int		hd_depth;	/**< list length */
};
static int
cfs_hash_hd_hhead_size(struct cfs_hash *hs)
{
	return sizeof(struct cfs_hash_head_dep);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 14 | 77.78% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 11.11% | 1 | 33.33% | 
| james simmons | james simmons | 2 | 11.11% | 1 | 33.33% | 
 | Total | 18 | 100.00% | 3 | 100.00% | 
static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
	struct cfs_hash_head_dep   *head;
	head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
	return &head[bd->bd_offset].hd_head;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 43 | 84.31% | 1 | 25.00% | 
| james simmons | james simmons | 4 | 7.84% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 7.84% | 2 | 50.00% | 
 | Total | 51 | 100.00% | 4 | 100.00% | 
static int
cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnode)
{
	struct cfs_hash_head_dep *hh;
	hh = container_of(cfs_hash_hd_hhead(hs, bd),
			  struct cfs_hash_head_dep, hd_head);
	hlist_add_head(hnode, &hh->hd_head);
	return ++hh->hd_depth;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 47 | 79.66% | 1 | 25.00% | 
| james simmons | james simmons | 8 | 13.56% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 6.78% | 2 | 50.00% | 
 | Total | 59 | 100.00% | 4 | 100.00% | 
static int
cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnode)
{
	struct cfs_hash_head_dep *hh;
	hh = container_of(cfs_hash_hd_hhead(hs, bd),
			  struct cfs_hash_head_dep, hd_head);
	hlist_del_init(hnode);
	return --hh->hd_depth;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 42 | 77.78% | 1 | 25.00% | 
| james simmons | james simmons | 8 | 14.81% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 7.41% | 2 | 50.00% | 
 | Total | 54 | 100.00% | 4 | 100.00% | 
/**
 * double links hash head without depth tracking
 * new element is always added to tail of hlist
 */
struct cfs_hash_dhead {
	
struct hlist_head	dh_head;	/**< entries list */
	
struct hlist_node	*dh_tail;	/**< the last entry */
};
static int
cfs_hash_dh_hhead_size(struct cfs_hash *hs)
{
	return sizeof(struct cfs_hash_dhead);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 14 | 77.78% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 11.11% | 1 | 33.33% | 
| james simmons | james simmons | 2 | 11.11% | 1 | 33.33% | 
 | Total | 18 | 100.00% | 3 | 100.00% | 
static struct hlist_head *
cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
	struct cfs_hash_dhead *head;
	head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
	return &head[bd->bd_offset].dh_head;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 43 | 84.31% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 7.84% | 2 | 50.00% | 
| james simmons | james simmons | 4 | 7.84% | 1 | 25.00% | 
 | Total | 51 | 100.00% | 4 | 100.00% | 
static int
cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnode)
{
	struct cfs_hash_dhead *dh;
	dh = container_of(cfs_hash_dh_hhead(hs, bd),
			  struct cfs_hash_dhead, dh_head);
	if (dh->dh_tail) /* not empty */
		hlist_add_behind(hnode, dh->dh_tail);
	else /* empty list */
		hlist_add_head(hnode, &dh->dh_head);
	dh->dh_tail = hnode;
	return -1; /* unknown depth */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 67 | 81.71% | 1 | 20.00% | 
| james simmons | james simmons | 8 | 9.76% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 4 | 4.88% | 2 | 40.00% | 
| ken helias | ken helias | 3 | 3.66% | 1 | 20.00% | 
 | Total | 82 | 100.00% | 5 | 100.00% | 
static int
cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnd)
{
	struct cfs_hash_dhead *dh;
	dh = container_of(cfs_hash_dh_hhead(hs, bd),
			  struct cfs_hash_dhead, dh_head);
	if (!hnd->next) { /* it's the tail */
		dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
			      container_of(hnd->pprev, struct hlist_node, next);
	}
	hlist_del_init(hnd);
	return -1; /* unknown depth */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 81 | 86.17% | 1 | 20.00% | 
| james simmons | james simmons | 8 | 8.51% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 4 | 4.26% | 2 | 40.00% | 
| oleg drokin | oleg drokin | 1 | 1.06% | 1 | 20.00% | 
 | Total | 94 | 100.00% | 5 | 100.00% | 
/**
 * double links hash head with depth tracking
 * new element is always added to tail of hlist
 */
struct cfs_hash_dhead_dep {
	
struct hlist_head	dd_head;	/**< entries list */
	
struct hlist_node	*dd_tail;	/**< the last entry */
	
unsigned int		dd_depth;	/**< list length */
};
static int
cfs_hash_dd_hhead_size(struct cfs_hash *hs)
{
	return sizeof(struct cfs_hash_dhead_dep);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 14 | 77.78% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 11.11% | 1 | 33.33% | 
| james simmons | james simmons | 2 | 11.11% | 1 | 33.33% | 
 | Total | 18 | 100.00% | 3 | 100.00% | 
static struct hlist_head *
cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
	struct cfs_hash_dhead_dep *head;
	head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
	return &head[bd->bd_offset].dd_head;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 43 | 84.31% | 1 | 25.00% | 
| james simmons | james simmons | 4 | 7.84% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 7.84% | 2 | 50.00% | 
 | Total | 51 | 100.00% | 4 | 100.00% | 
static int
cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnode)
{
	struct cfs_hash_dhead_dep *dh;
	dh = container_of(cfs_hash_dd_hhead(hs, bd),
			  struct cfs_hash_dhead_dep, dd_head);
	if (dh->dd_tail) /* not empty */
		hlist_add_behind(hnode, dh->dd_tail);
	else /* empty list */
		hlist_add_head(hnode, &dh->dd_head);
	dh->dd_tail = hnode;
	return ++dh->dd_depth;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 68 | 81.93% | 1 | 20.00% | 
| james simmons | james simmons | 8 | 9.64% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 4 | 4.82% | 2 | 40.00% | 
| ken helias | ken helias | 3 | 3.61% | 1 | 20.00% | 
 | Total | 83 | 100.00% | 5 | 100.00% | 
static int
cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		      struct hlist_node *hnd)
{
	struct cfs_hash_dhead_dep *dh;
	dh = container_of(cfs_hash_dd_hhead(hs, bd),
			  struct cfs_hash_dhead_dep, dd_head);
	if (!hnd->next) { /* it's the tail */
		dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
			      container_of(hnd->pprev, struct hlist_node, next);
	}
	hlist_del_init(hnd);
	return --dh->dd_depth;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 82 | 86.32% | 1 | 20.00% | 
| james simmons | james simmons | 8 | 8.42% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 4 | 4.21% | 2 | 40.00% | 
| oleg drokin | oleg drokin | 1 | 1.05% | 1 | 20.00% | 
 | Total | 95 | 100.00% | 5 | 100.00% | 
static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
	.hop_hhead	= cfs_hash_hh_hhead,
	.hop_hhead_size	= cfs_hash_hh_hhead_size,
	.hop_hnode_add	= cfs_hash_hh_hnode_add,
	.hop_hnode_del	= cfs_hash_hh_hnode_del,
};
static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
	.hop_hhead	= cfs_hash_hd_hhead,
	.hop_hhead_size	= cfs_hash_hd_hhead_size,
	.hop_hnode_add	= cfs_hash_hd_hnode_add,
	.hop_hnode_del	= cfs_hash_hd_hnode_del,
};
static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
	.hop_hhead	= cfs_hash_dh_hhead,
	.hop_hhead_size	= cfs_hash_dh_hhead_size,
	.hop_hnode_add	= cfs_hash_dh_hnode_add,
	.hop_hnode_del	= cfs_hash_dh_hnode_del,
};
static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
	.hop_hhead	= cfs_hash_dd_hhead,
	.hop_hhead_size	= cfs_hash_dd_hhead_size,
	.hop_hnode_add	= cfs_hash_dd_hnode_add,
	.hop_hnode_del	= cfs_hash_dd_hnode_del,
};
static void
cfs_hash_hlist_setup(struct cfs_hash *hs)
{
	if (cfs_hash_with_add_tail(hs)) {
		hs->hs_hops = cfs_hash_with_depth(hs) ?
			      &cfs_hash_dd_hops : &cfs_hash_dh_hops;
	} else {
		hs->hs_hops = cfs_hash_with_depth(hs) ?
			      &cfs_hash_hd_hops : &cfs_hash_hh_hops;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 51 | 96.23% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 3.77% | 1 | 50.00% | 
 | Total | 53 | 100.00% | 2 | 100.00% | 
static void
cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
		     unsigned int bits, const void *key, struct cfs_hash_bd *bd)
{
	unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
	LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
	bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
	bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 98 | 94.23% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 6 | 5.77% | 3 | 75.00% | 
 | Total | 104 | 100.00% | 4 | 100.00% | 
void
cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
{
	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
	if (likely(!hs->hs_rehash_buckets)) {
		cfs_hash_bd_from_key(hs, hs->hs_buckets,
				     hs->hs_cur_bits, key, bd);
	} else {
		LASSERT(hs->hs_rehash_bits != 0);
		cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
				     hs->hs_rehash_bits, key, bd);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 74 | 93.67% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 5.06% | 2 | 50.00% | 
| oleg drokin | oleg drokin | 1 | 1.27% | 1 | 25.00% | 
 | Total | 79 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_bd_get);
static inline void
cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
{
	if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
		return;
	bd->bd_bucket->hsb_depmax = dep_cur;
# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
	if (likely(warn_on_depth == 0 ||
		   max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
		return;
	spin_lock(&hs->hs_dep_lock);
	hs->hs_dep_max  = dep_cur;
	hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
	hs->hs_dep_off  = bd->bd_offset;
	hs->hs_dep_bits = hs->hs_cur_bits;
	spin_unlock(&hs->hs_dep_lock);
	cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
# endif
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 124 | 96.88% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 3.12% | 2 | 66.67% | 
 | Total | 128 | 100.00% | 3 | 100.00% | 
void
cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		       struct hlist_node *hnode)
{
	int rc;
	rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
	cfs_hash_bd_dep_record(hs, bd, rc);
	bd->bd_bucket->hsb_version++;
	if (unlikely(bd->bd_bucket->hsb_version == 0))
		bd->bd_bucket->hsb_version++;
	bd->bd_bucket->hsb_count++;
	if (cfs_hash_with_counter(hs))
		atomic_inc(&hs->hs_count);
	if (!cfs_hash_with_no_itemref(hs))
		cfs_hash_get(hs, hnode);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 107 | 96.40% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 3.60% | 2 | 66.67% | 
 | Total | 111 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_bd_add_locked);
void
cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
		       struct hlist_node *hnode)
{
	hs->hs_hops->hop_hnode_del(hs, bd, hnode);
	LASSERT(bd->bd_bucket->hsb_count > 0);
	bd->bd_bucket->hsb_count--;
	bd->bd_bucket->hsb_version++;
	if (unlikely(bd->bd_bucket->hsb_version == 0))
		bd->bd_bucket->hsb_version++;
	if (cfs_hash_with_counter(hs)) {
		LASSERT(atomic_read(&hs->hs_count) > 0);
		atomic_dec(&hs->hs_count);
	}
	if (!cfs_hash_with_no_itemref(hs))
		cfs_hash_put_locked(hs, hnode);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 119 | 96.75% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 3.25% | 2 | 66.67% | 
 | Total | 123 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_bd_del_locked);
void
cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
			struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
{
	struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
	struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
	int rc;
	if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
		return;
	/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
         * in cfs_hash_bd_del/add_locked
         */
	hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
	rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
	cfs_hash_bd_dep_record(hs, bd_new, rc);
	LASSERT(obkt->hsb_count > 0);
	obkt->hsb_count--;
	obkt->hsb_version++;
	if (unlikely(obkt->hsb_version == 0))
		obkt->hsb_version++;
	nbkt->hsb_count++;
	nbkt->hsb_version++;
	if (unlikely(nbkt->hsb_version == 0))
		nbkt->hsb_version++;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 146 | 92.99% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 10 | 6.37% | 3 | 60.00% | 
| oleg drokin | oleg drokin | 1 | 0.64% | 1 | 20.00% | 
 | Total | 157 | 100.00% | 5 | 100.00% | 
enum {
	/** always set, for sanity (avoid ZERO intent) */
	
CFS_HS_LOOKUP_MASK_FIND	= BIT(0),
	/** return entry with a ref */
	
CFS_HS_LOOKUP_MASK_REF	= BIT(1),
	/** add entry if not existing */
	
CFS_HS_LOOKUP_MASK_ADD	= BIT(2),
	/** delete entry, ignore other masks */
	
CFS_HS_LOOKUP_MASK_DEL	= BIT(3),
};
enum cfs_hash_lookup_intent {
	/** return item w/o refcount */
	
CFS_HS_LOOKUP_IT_PEEK	 = CFS_HS_LOOKUP_MASK_FIND,
	/** return item with refcount */
	
CFS_HS_LOOKUP_IT_FIND	 = (CFS_HS_LOOKUP_MASK_FIND |
				    CFS_HS_LOOKUP_MASK_REF),
	/** return item w/o refcount if existed, otherwise add */
	
CFS_HS_LOOKUP_IT_ADD	 = (CFS_HS_LOOKUP_MASK_FIND |
				    CFS_HS_LOOKUP_MASK_ADD),
	/** return item with refcount if existed, otherwise add */
	
CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
				    CFS_HS_LOOKUP_MASK_ADD),
	/** delete if existed */
	
CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
				    CFS_HS_LOOKUP_MASK_DEL)
};
static struct hlist_node *
cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			  const void *key, struct hlist_node *hnode,
			  enum cfs_hash_lookup_intent intent)
{
	struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
	struct hlist_node *ehnode;
	struct hlist_node *match;
	int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
	/* with this function, we can avoid a lot of useless refcount ops,
         * which are expensive atomic operations most time.
         */
	match = intent_add ? NULL : hnode;
	hlist_for_each(ehnode, hhead) {
		if (!cfs_hash_keycmp(hs, key, ehnode))
			continue;
		if (match && match != ehnode) /* can't match */
			continue;
		/* match and ... */
		if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
			cfs_hash_bd_del_locked(hs, bd, ehnode);
			return ehnode;
		}
		/* caller wants refcount? */
		if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
			cfs_hash_get(hs, ehnode);
		return ehnode;
	}
	/* no match item */
	if (!intent_add)
		return NULL;
	LASSERT(hnode);
	cfs_hash_bd_add_locked(hs, bd, hnode);
	return hnode;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 170 | 96.05% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 4 | 2.26% | 2 | 40.00% | 
| james simmons | james simmons | 2 | 1.13% | 1 | 20.00% | 
| oleg drokin | oleg drokin | 1 | 0.56% | 1 | 20.00% | 
 | Total | 177 | 100.00% | 5 | 100.00% | 
struct hlist_node *
cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			  const void *key)
{
	return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
					 CFS_HS_LOOKUP_IT_FIND);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 32 | 88.89% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 11.11% | 2 | 66.67% | 
 | Total | 36 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
struct hlist_node *
cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			const void *key)
{
	return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
					 CFS_HS_LOOKUP_IT_PEEK);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 32 | 88.89% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 11.11% | 2 | 66.67% | 
 | Total | 36 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
static void
cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
		       unsigned n, int excl)
{
	struct cfs_hash_bucket *prev = NULL;
	int i;
	/**
         * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
         * NB: it's possible that several bds point to the same bucket but
         * have different bd::bd_offset, so need take care of deadlock.
         */
	cfs_hash_for_each_bd(bds, n, i) {
		if (prev == bds[i].bd_bucket)
			continue;
		LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index);
		cfs_hash_bd_lock(hs, &bds[i], excl);
		prev = bds[i].bd_bucket;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 89 | 92.71% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 6 | 6.25% | 3 | 60.00% | 
| oleg drokin | oleg drokin | 1 | 1.04% | 1 | 20.00% | 
 | Total | 96 | 100.00% | 5 | 100.00% | 
static void
cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
			 unsigned n, int excl)
{
	struct cfs_hash_bucket *prev = NULL;
	int i;
	cfs_hash_for_each_bd(bds, n, i) {
		if (prev != bds[i].bd_bucket) {
			cfs_hash_bd_unlock(hs, &bds[i], excl);
			prev = bds[i].bd_bucket;
		}
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 71 | 92.21% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 6 | 7.79% | 3 | 75.00% | 
 | Total | 77 | 100.00% | 4 | 100.00% | 
static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
				unsigned n, const void *key)
{
	struct hlist_node *ehnode;
	unsigned i;
	cfs_hash_for_each_bd(bds, n, i) {
		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
						   CFS_HS_LOOKUP_IT_FIND);
		if (ehnode)
			return ehnode;
	}
	return NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 69 | 94.52% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 5.48% | 2 | 66.67% | 
 | Total | 73 | 100.00% | 3 | 100.00% | 
static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
				 unsigned n, const void *key,
				 struct hlist_node *hnode, int noref)
{
	struct hlist_node *ehnode;
	int intent;
	unsigned i;
	LASSERT(hnode);
	intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
	cfs_hash_for_each_bd(bds, n, i) {
		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
						   NULL, intent);
		if (ehnode)
			return ehnode;
	}
	if (i == 1) { /* only one bucket */
		cfs_hash_bd_add_locked(hs, &bds[0], hnode);
	} else {
		struct cfs_hash_bd mybd;
		cfs_hash_bd_get(hs, key, &mybd);
		cfs_hash_bd_add_locked(hs, &mybd, hnode);
	}
	return hnode;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 141 | 94.63% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 6 | 4.03% | 2 | 50.00% | 
| julia lawall | julia lawall | 2 | 1.34% | 1 | 25.00% | 
 | Total | 149 | 100.00% | 4 | 100.00% | 
static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
				 unsigned n, const void *key,
				 struct hlist_node *hnode)
{
	struct hlist_node *ehnode;
	unsigned int i;
	cfs_hash_for_each_bd(bds, n, i) {
		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
						   CFS_HS_LOOKUP_IT_FINDDEL);
		if (ehnode)
			return ehnode;
	}
	return NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 74 | 93.67% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 5.06% | 2 | 50.00% | 
| james simmons | james simmons | 1 | 1.27% | 1 | 25.00% | 
 | Total | 79 | 100.00% | 4 | 100.00% | 
static void
cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
{
	int rc;
	if (!bd2->bd_bucket)
		return;
	if (!bd1->bd_bucket) {
		*bd1 = *bd2;
		bd2->bd_bucket = NULL;
		return;
	}
	rc = cfs_hash_bd_compare(bd1, bd2);
	if (!rc)
		bd2->bd_bucket = NULL;
	else if (rc > 0)
		swap(*bd1, *bd2); /* swap bd1 and bd2 */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 73 | 84.88% | 1 | 20.00% | 
| amitoj kaur chawla | amitoj kaur chawla | 7 | 8.14% | 2 | 40.00% | 
| lisa nguyen | lisa nguyen | 4 | 4.65% | 1 | 20.00% | 
| oleg drokin | oleg drokin | 2 | 2.33% | 1 | 20.00% | 
 | Total | 86 | 100.00% | 5 | 100.00% | 
void
cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
		     struct cfs_hash_bd *bds)
{
	/* NB: caller should hold hs_lock.rw if REHASH is set */
	cfs_hash_bd_from_key(hs, hs->hs_buckets,
			     hs->hs_cur_bits, key, &bds[0]);
	if (likely(!hs->hs_rehash_buckets)) {
		/* no rehash or not rehashing */
		bds[1].bd_bucket = NULL;
		return;
	}
	LASSERT(hs->hs_rehash_bits != 0);
	cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
			     hs->hs_rehash_bits, key, &bds[1]);
	cfs_hash_bd_order(&bds[0], &bds[1]);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 105 | 95.45% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 3.64% | 2 | 50.00% | 
| oleg drokin | oleg drokin | 1 | 0.91% | 1 | 25.00% | 
 | Total | 110 | 100.00% | 4 | 100.00% | 
void
cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
{
	cfs_hash_multi_bd_lock(hs, bds, 2, excl);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 25 | 86.21% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 13.79% | 2 | 66.67% | 
 | Total | 29 | 100.00% | 3 | 100.00% | 
void
cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
{
	cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 25 | 86.21% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 13.79% | 2 | 66.67% | 
 | Total | 29 | 100.00% | 3 | 100.00% | 
struct hlist_node *
cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
			       const void *key)
{
	return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 30 | 88.24% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 11.76% | 2 | 66.67% | 
 | Total | 34 | 100.00% | 3 | 100.00% | 
struct hlist_node *
cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
				const void *key, struct hlist_node *hnode,
				int noref)
{
	return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
						hnode, noref);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 42 | 91.30% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 8.70% | 2 | 66.67% | 
 | Total | 46 | 100.00% | 3 | 100.00% | 
struct hlist_node *
cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
				const void *key, struct hlist_node *hnode)
{
	return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 37 | 90.24% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 9.76% | 2 | 66.67% | 
 | Total | 41 | 100.00% | 3 | 100.00% | 
static void
cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
		      int bkt_size, int prev_size, int size)
{
	int i;
	for (i = prev_size; i < size; i++) {
		if (buckets[i])
			LIBCFS_FREE(buckets[i], bkt_size);
	}
	LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 69 | 97.18% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 2.82% | 1 | 50.00% | 
 | Total | 71 | 100.00% | 2 | 100.00% | 
/*
 * Create or grow bucket memory. Return old_buckets if no allocation was
 * needed, the newly allocated buckets if allocation was needed and
 * successful, and NULL on error.
 */
static struct cfs_hash_bucket **
cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
			 unsigned int old_size, unsigned int new_size)
{
	struct cfs_hash_bucket **new_bkts;
	int i;
	LASSERT(old_size == 0 || old_bkts);
	if (old_bkts && old_size == new_size)
		return old_bkts;
	LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
	if (!new_bkts)
		return NULL;
	if (old_bkts) {
		memcpy(new_bkts, old_bkts,
		       min(old_size, new_size) * sizeof(*old_bkts));
	}
	for (i = old_size; i < new_size; i++) {
		struct hlist_head *hhead;
		struct cfs_hash_bd bd;
		LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
		if (!new_bkts[i]) {
			cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
					      old_size, new_size);
			return NULL;
		}
		new_bkts[i]->hsb_index   = i;
		new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
		new_bkts[i]->hsb_depmax  = -1; /* unknown */
		bd.bd_bucket = new_bkts[i];
		cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
			INIT_HLIST_HEAD(hhead);
		if (cfs_hash_with_no_lock(hs) ||
		    cfs_hash_with_no_bktlock(hs))
			continue;
		if (cfs_hash_with_rw_bktlock(hs))
			rwlock_init(&new_bkts[i]->hsb_lock.rw);
		else if (cfs_hash_with_spin_bktlock(hs))
			spin_lock_init(&new_bkts[i]->hsb_lock.spin);
		else
			LBUG(); /* invalid use-case */
	}
	return new_bkts;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 272 | 95.77% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 10 | 3.52% | 3 | 60.00% | 
| oleg drokin | oleg drokin | 2 | 0.70% | 1 | 20.00% | 
 | Total | 284 | 100.00% | 5 | 100.00% | 
/**
 * Initialize new libcfs hash, where:
 * @name     - Descriptive hash name
 * @cur_bits - Initial hash table size, in bits
 * @max_bits - Maximum allowed hash table resize, in bits
 * @ops      - Registered hash table operations
 * @flags    - CFS_HASH_REHASH enable synamic hash resizing
 *           - CFS_HASH_SORT enable chained hash sort
 */
static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(struct cfs_workitem *wi)
{
	struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
	int dep;
	int bkt;
	int off;
	int bits;
	spin_lock(&hs->hs_dep_lock);
	dep  = hs->hs_dep_max;
	bkt  = hs->hs_dep_bkt;
	off  = hs->hs_dep_off;
	bits = hs->hs_dep_bits;
	spin_unlock(&hs->hs_dep_lock);
	LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
		      hs->hs_name, bits, dep, bkt, off);
	spin_lock(&hs->hs_dep_lock);
	hs->hs_dep_bits = 0; /* mark as workitem done */
	spin_unlock(&hs->hs_dep_lock);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 115 | 95.04% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 3.31% | 1 | 33.33% | 
| james simmons | james simmons | 2 | 1.65% | 1 | 33.33% | 
 | Total | 121 | 100.00% | 3 | 100.00% | 
static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
{
	spin_lock_init(&hs->hs_dep_lock);
	cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 29 | 93.55% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 6.45% | 1 | 50.00% | 
 | Total | 31 | 100.00% | 2 | 100.00% | 
static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
{
	if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
		return;
	spin_lock(&hs->hs_dep_lock);
	while (hs->hs_dep_bits != 0) {
		spin_unlock(&hs->hs_dep_lock);
		cond_resched();
		spin_lock(&hs->hs_dep_lock);
	}
	spin_unlock(&hs->hs_dep_lock);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 67 | 97.10% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 2.90% | 1 | 50.00% | 
 | Total | 69 | 100.00% | 2 | 100.00% | 
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 9 | 81.82% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 18.18% | 1 | 50.00% | 
 | Total | 11 | 100.00% | 2 | 100.00% | 
static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 9 | 81.82% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 18.18% | 1 | 50.00% | 
 | Total | 11 | 100.00% | 2 | 100.00% | 
#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
struct cfs_hash *
cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
		unsigned bkt_bits, unsigned extra_bytes,
		unsigned min_theta, unsigned max_theta,
		struct cfs_hash_ops *ops, unsigned flags)
{
	struct cfs_hash *hs;
	int len;
	CLASSERT(CFS_HASH_THETA_BITS < 15);
	LASSERT(name);
	LASSERT(ops->hs_key);
	LASSERT(ops->hs_hash);
	LASSERT(ops->hs_object);
	LASSERT(ops->hs_keycmp);
	LASSERT(ops->hs_get);
	LASSERT(ops->hs_put_locked);
	if ((flags & CFS_HASH_REHASH) != 0)
		flags |= CFS_HASH_COUNTER; /* must have counter */
	LASSERT(cur_bits > 0);
	LASSERT(cur_bits >= bkt_bits);
	LASSERT(max_bits >= cur_bits && max_bits < 31);
	LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
	LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
		     (flags & CFS_HASH_NO_LOCK) == 0));
	LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
	len = (flags & CFS_HASH_BIGNAME) == 0 ?
	      CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
	LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
	if (!hs)
		return NULL;
	strlcpy(hs->hs_name, name, len);
	hs->hs_flags = flags;
	atomic_set(&hs->hs_refcount, 1);
	atomic_set(&hs->hs_count, 0);
	cfs_hash_lock_setup(hs);
	cfs_hash_hlist_setup(hs);
	hs->hs_cur_bits = (__u8)cur_bits;
	hs->hs_min_bits = (__u8)cur_bits;
	hs->hs_max_bits = (__u8)max_bits;
	hs->hs_bkt_bits = (__u8)bkt_bits;
	hs->hs_ops	   = ops;
	hs->hs_extra_bytes = extra_bytes;
	hs->hs_rehash_bits = 0;
	cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
	cfs_hash_depth_wi_init(hs);
	if (cfs_hash_with_rehash(hs))
		__cfs_hash_set_theta(hs, min_theta, max_theta);
	hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
						  CFS_HASH_NBKT(hs));
	if (hs->hs_buckets)
		return hs;
	LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
	return NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 399 | 96.14% | 1 | 16.67% | 
| lisa nguyen | lisa nguyen | 8 | 1.93% | 1 | 16.67% | 
| greg kroah-hartman | greg kroah-hartman | 4 | 0.96% | 1 | 16.67% | 
| james simmons | james simmons | 2 | 0.48% | 1 | 16.67% | 
| oleg drokin | oleg drokin | 1 | 0.24% | 1 | 16.67% | 
| dmitry eremin-solenikov | dmitry eremin-solenikov | 1 | 0.24% | 1 | 16.67% | 
 | Total | 415 | 100.00% | 6 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_create);
/**
 * Cleanup libcfs hash @hs.
 */
static void
cfs_hash_destroy(struct cfs_hash *hs)
{
	struct hlist_node *hnode;
	struct hlist_node *pos;
	struct cfs_hash_bd bd;
	int i;
	LASSERT(hs);
	LASSERT(!cfs_hash_is_exiting(hs) &&
		!cfs_hash_is_iterating(hs));
	/**
         * prohibit further rehashes, don't need any lock because
         * I'm the only (last) one can change it.
         */
	hs->hs_exiting = 1;
	if (cfs_hash_with_rehash(hs))
		cfs_hash_rehash_cancel(hs);
	cfs_hash_depth_wi_cancel(hs);
	/* rehash should be done/canceled */
	LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets);
	cfs_hash_for_each_bucket(hs, &bd, i) {
		struct hlist_head *hhead;
		LASSERT(bd.bd_bucket);
		/* no need to take this lock, just for consistent code */
		cfs_hash_bd_lock(hs, &bd, 1);
		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
			hlist_for_each_safe(hnode, pos, hhead) {
				LASSERTF(!cfs_hash_with_assert_empty(hs),
					 "hash %s bucket %u(%u) is not empty: %u items left\n",
					 hs->hs_name, bd.bd_bucket->hsb_index,
					 bd.bd_offset, bd.bd_bucket->hsb_count);
				/* can't assert key valicate, because we
                                 * can interrupt rehash
                                 */
				cfs_hash_bd_del_locked(hs, &bd, hnode);
				cfs_hash_exit(hs, hnode);
			}
		}
		LASSERT(bd.bd_bucket->hsb_count == 0);
		cfs_hash_bd_unlock(hs, &bd, 1);
		cond_resched();
	}
	LASSERT(atomic_read(&hs->hs_count) == 0);
	cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
			      0, CFS_HASH_NBKT(hs));
	i = cfs_hash_with_bigname(hs) ?
	    CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
	LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 261 | 96.67% | 1 | 16.67% | 
| lisa nguyen | lisa nguyen | 6 | 2.22% | 2 | 33.33% | 
| oleg drokin | oleg drokin | 2 | 0.74% | 2 | 33.33% | 
| joe perches | joe perches | 1 | 0.37% | 1 | 16.67% | 
 | Total | 270 | 100.00% | 6 | 100.00% | 
struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
{
	if (atomic_inc_not_zero(&hs->hs_refcount))
		return hs;
	return NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 24 | 85.71% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 4 | 14.29% | 1 | 50.00% | 
 | Total | 28 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_getref);
void cfs_hash_putref(struct cfs_hash *hs)
{
	if (atomic_dec_and_test(&hs->hs_refcount))
		cfs_hash_destroy(hs);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 23 | 92.00% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 8.00% | 1 | 50.00% | 
 | Total | 25 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_putref);
static inline int
cfs_hash_rehash_bits(struct cfs_hash *hs)
{
	if (cfs_hash_with_no_lock(hs) ||
	    !cfs_hash_with_rehash(hs))
		return -EOPNOTSUPP;
	if (unlikely(cfs_hash_is_exiting(hs)))
		return -ESRCH;
	if (unlikely(cfs_hash_is_rehashing(hs)))
		return -EALREADY;
	if (unlikely(cfs_hash_is_iterating(hs)))
		return -EAGAIN;
	/* XXX: need to handle case with max_theta != 2.0
         *      and the case with min_theta != 0.5
         */
	if ((hs->hs_cur_bits < hs->hs_max_bits) &&
	    (__cfs_hash_theta(hs) > hs->hs_max_theta))
		return hs->hs_cur_bits + 1;
	if (!cfs_hash_with_shrink(hs))
		return 0;
	if ((hs->hs_cur_bits > hs->hs_min_bits) &&
	    (__cfs_hash_theta(hs) < hs->hs_min_theta))
		return hs->hs_cur_bits - 1;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 143 | 97.95% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 1.37% | 1 | 33.33% | 
| oleg drokin | oleg drokin | 1 | 0.68% | 1 | 33.33% | 
 | Total | 146 | 100.00% | 3 | 100.00% | 
/**
 * don't allow inline rehash if:
 * - user wants non-blocking change (add/del) on hash table
 * - too many elements
 */
static inline int
cfs_hash_rehash_inline(struct cfs_hash *hs)
{
	return !cfs_hash_with_nblk_change(hs) &&
	       atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 27 | 93.10% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 6.90% | 1 | 50.00% | 
 | Total | 29 | 100.00% | 2 | 100.00% | 
/**
 * Add item @hnode to libcfs hash @hs using @key.  The registered
 * ops->hs_get function will be called when the item is added.
 */
void
cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
	struct cfs_hash_bd bd;
	int bits;
	LASSERT(hlist_unhashed(hnode));
	cfs_hash_lock(hs, 0);
	cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
	cfs_hash_key_validate(hs, key, hnode);
	cfs_hash_bd_add_locked(hs, &bd, hnode);
	cfs_hash_bd_unlock(hs, &bd, 1);
	bits = cfs_hash_rehash_bits(hs);
	cfs_hash_unlock(hs, 0);
	if (bits > 0)
		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 109 | 96.46% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 3.54% | 2 | 66.67% | 
 | Total | 113 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_add);
static struct hlist_node *
cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
		     struct hlist_node *hnode, int noref)
{
	struct hlist_node *ehnode;
	struct cfs_hash_bd bds[2];
	int bits = 0;
	LASSERT(hlist_unhashed(hnode));
	cfs_hash_lock(hs, 0);
	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
	cfs_hash_key_validate(hs, key, hnode);
	ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
						 hnode, noref);
	cfs_hash_dual_bd_unlock(hs, bds, 1);
	if (ehnode == hnode)	/* new item added */
		bits = cfs_hash_rehash_bits(hs);
	cfs_hash_unlock(hs, 0);
	if (bits > 0)
		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
	return ehnode;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 138 | 97.18% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 2.82% | 2 | 66.67% | 
 | Total | 142 | 100.00% | 3 | 100.00% | 
/**
 * Add item @hnode to libcfs hash @hs using @key.  The registered
 * ops->hs_get function will be called if the item was added.
 * Returns 0 on success or -EALREADY on key collisions.
 */
int
cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
		    struct hlist_node *hnode)
{
	return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
	       -EALREADY : 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 37 | 94.87% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 5.13% | 1 | 50.00% | 
 | Total | 39 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_add_unique);
/**
 * Add item @hnode to libcfs hash @hs using @key.  If this @key
 * already exists in the hash then ops->hs_get will be called on the
 * conflicting entry and that entry will be returned to the caller.
 * Otherwise ops->hs_get is called on the item which was added.
 */
void *
cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
			struct hlist_node *hnode)
{
	hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
	return cfs_hash_object(hs, hnode);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 40 | 95.24% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 4.76% | 1 | 50.00% | 
 | Total | 42 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_findadd_unique);
/**
 * Delete item @hnode from the libcfs hash @hs using @key.  The @key
 * is required to ensure the correct hash bucket is locked since there
 * is no direct linkage from the item to the bucket.  The object
 * removed from the hash will be returned and obs->hs_put is called
 * on the removed object.
 */
void *
cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
	void *obj = NULL;
	int bits = 0;
	struct cfs_hash_bd bds[2];
	cfs_hash_lock(hs, 0);
	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
	/* NB: do nothing if @hnode is not in hash table */
	if (!hnode || !hlist_unhashed(hnode)) {
		if (!bds[1].bd_bucket && hnode) {
			cfs_hash_bd_del_locked(hs, &bds[0], hnode);
		} else {
			hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
								key, hnode);
		}
	}
	if (hnode) {
		obj  = cfs_hash_object(hs, hnode);
		bits = cfs_hash_rehash_bits(hs);
	}
	cfs_hash_dual_bd_unlock(hs, bds, 1);
	cfs_hash_unlock(hs, 0);
	if (bits > 0)
		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
	return obj;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 165 | 96.49% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 2.34% | 2 | 50.00% | 
| oleg drokin | oleg drokin | 2 | 1.17% | 1 | 25.00% | 
 | Total | 171 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_del);
/**
 * Delete item given @key in libcfs hash @hs.  The first @key found in
 * the hash will be removed, if the key exists multiple times in the hash
 * @hs this function must be called once per key.  The removed object
 * will be returned and ops->hs_put is called on the removed object.
 */
void *
cfs_hash_del_key(struct cfs_hash *hs, const void *key)
{
	return cfs_hash_del(hs, key, NULL);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 24 | 92.31% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 7.69% | 1 | 50.00% | 
 | Total | 26 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_del_key);
/**
 * Lookup an item using @key in the libcfs hash @hs and return it.
 * If the @key is found in the hash hs->hs_get() is called and the
 * matching objects is returned.  It is the callers responsibility
 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
 * when when finished with the object.  If the @key was not found
 * in the hash @hs NULL is returned.
 */
void *
cfs_hash_lookup(struct cfs_hash *hs, const void *key)
{
	void *obj = NULL;
	struct hlist_node *hnode;
	struct cfs_hash_bd bds[2];
	cfs_hash_lock(hs, 0);
	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
	hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
	if (hnode)
		obj = cfs_hash_object(hs, hnode);
	cfs_hash_dual_bd_unlock(hs, bds, 0);
	cfs_hash_unlock(hs, 0);
	return obj;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 91 | 95.79% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 4.21% | 2 | 66.67% | 
 | Total | 95 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_lookup);
static void
cfs_hash_for_each_enter(struct cfs_hash *hs)
{
	LASSERT(!cfs_hash_is_exiting(hs));
	if (!cfs_hash_with_rehash(hs))
		return;
	/*
         * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
         * because it's just an unreliable signal to rehash-thread,
         * rehash-thread will try to finish rehash ASAP when seeing this.
         */
	hs->hs_iterating = 1;
	cfs_hash_lock(hs, 1);
	hs->hs_iterators++;
	/* NB: iteration is mostly called by service thread,
         * we tend to cancel pending rehash-request, instead of
         * blocking service thread, we will relaunch rehash request
         * after iteration
         */
	if (cfs_hash_is_rehashing(hs))
		cfs_hash_rehash_cancel_locked(hs);
	cfs_hash_unlock(hs, 1);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 64 | 94.12% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 2 | 2.94% | 1 | 25.00% | 
| masanari iida | masanari iida | 1 | 1.47% | 1 | 25.00% | 
| oleg drokin | oleg drokin | 1 | 1.47% | 1 | 25.00% | 
 | Total | 68 | 100.00% | 4 | 100.00% | 
static void
cfs_hash_for_each_exit(struct cfs_hash *hs)
{
	int remained;
	int bits;
	if (!cfs_hash_with_rehash(hs))
		return;
	cfs_hash_lock(hs, 1);
	remained = --hs->hs_iterators;
	bits = cfs_hash_rehash_bits(hs);
	cfs_hash_unlock(hs, 1);
	/* NB: it's race on cfs_has_t::hs_iterating, see above */
	if (remained == 0)
		hs->hs_iterating = 0;
	if (bits > 0) {
		cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
				    CFS_HASH_LOOP_HOG);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 88 | 97.78% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 2.22% | 1 | 50.00% | 
 | Total | 90 | 100.00% | 2 | 100.00% | 
/**
 * For each item in the libcfs hash @hs call the passed callback @func
 * and pass to it as an argument each hash item and the private @data.
 *
 * a) the function may sleep!
 * b) during the callback:
 *    . the bucket lock is held so the callback must never sleep.
 *    . if @removal_safe is true, use can remove current item by
 *      cfs_hash_bd_del_locked
 */
static __u64
cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
			void *data, int remove_safe)
{
	struct hlist_node *hnode;
	struct hlist_node *pos;
	struct cfs_hash_bd bd;
	__u64 count = 0;
	int excl = !!remove_safe;
	int loop = 0;
	int i;
	cfs_hash_for_each_enter(hs);
	cfs_hash_lock(hs, 0);
	LASSERT(!cfs_hash_is_rehashing(hs));
	cfs_hash_for_each_bucket(hs, &bd, i) {
		struct hlist_head *hhead;
		cfs_hash_bd_lock(hs, &bd, excl);
		if (!func) { /* only glimpse size */
			count += bd.bd_bucket->hsb_count;
			cfs_hash_bd_unlock(hs, &bd, excl);
			continue;
		}
		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
			hlist_for_each_safe(hnode, pos, hhead) {
				cfs_hash_bucket_validate(hs, &bd, hnode);
				count++;
				loop++;
				if (func(hs, &bd, hnode, data)) {
					cfs_hash_bd_unlock(hs, &bd, excl);
					goto out;
				}
			}
		}
		cfs_hash_bd_unlock(hs, &bd, excl);
		if (loop < CFS_HASH_LOOP_HOG)
			continue;
		loop = 0;
		cfs_hash_unlock(hs, 0);
		cond_resched();
		cfs_hash_lock(hs, 0);
	}
 out:
	cfs_hash_unlock(hs, 0);
	cfs_hash_for_each_exit(hs);
	return count;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 241 | 97.18% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 4 | 1.61% | 2 | 40.00% | 
| greg kroah-hartman | greg kroah-hartman | 2 | 0.81% | 1 | 20.00% | 
| oleg drokin | oleg drokin | 1 | 0.40% | 1 | 20.00% | 
 | Total | 248 | 100.00% | 5 | 100.00% | 
struct cfs_hash_cond_arg {
	
cfs_hash_cond_opt_cb_t	func;
	
void			*arg;
};
static int
cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			 struct hlist_node *hnode, void *data)
{
	struct cfs_hash_cond_arg *cond = data;
	if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
		cfs_hash_bd_del_locked(hs, bd, hnode);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 56 | 90.32% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 6.45% | 2 | 50.00% | 
| james simmons | james simmons | 2 | 3.23% | 1 | 25.00% | 
 | Total | 62 | 100.00% | 4 | 100.00% | 
/**
 * Delete item from the libcfs hash @hs when @func return true.
 * The write lock being hold during loop for each bucket to avoid
 * any object be reference.
 */
void
cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
{
	struct cfs_hash_cond_arg arg = {
		.func	= func,
		.arg	= data,
        };
	cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 41 | 91.11% | 1 | 33.33% | 
| james simmons | james simmons | 2 | 4.44% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 4.44% | 1 | 33.33% | 
 | Total | 45 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_cond_del);
void
cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
		  void *data)
{
	cfs_hash_for_each_tight(hs, func, data, 0);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 26 | 92.86% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 7.14% | 1 | 50.00% | 
 | Total | 28 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_for_each);
void
cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
		       void *data)
{
	cfs_hash_for_each_tight(hs, func, data, 1);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 26 | 92.86% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 7.14% | 1 | 50.00% | 
 | Total | 28 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_for_each_safe);
static int
cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
	      struct hlist_node *hnode, void *data)
{
	*(int *)data = 0;
	return 1; /* return 1 to break the loop */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 34 | 89.47% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 10.53% | 2 | 66.67% | 
 | Total | 38 | 100.00% | 3 | 100.00% | 
int
cfs_hash_is_empty(struct cfs_hash *hs)
{
	int empty = 1;
	cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
	return empty;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 28 | 93.33% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 6.67% | 1 | 50.00% | 
 | Total | 30 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_is_empty);
__u64
cfs_hash_size_get(struct cfs_hash *hs)
{
	return cfs_hash_with_counter(hs) ?
	       atomic_read(&hs->hs_count) :
	       cfs_hash_for_each_tight(hs, NULL, NULL, 0);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 33 | 94.29% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 5.71% | 1 | 50.00% | 
 | Total | 35 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_size_get);
/*
 * cfs_hash_for_each_relax:
 * Iterate the hash table and call @func on each item without
 * any lock. This function can't guarantee to finish iteration
 * if these features are enabled:
 *
 *  a. if rehash_key is enabled, an item can be moved from
 *     one bucket to another bucket
 *  b. user can remove non-zero-ref item from hash-table,
 *     so the item can be removed from hash-table, even worse,
 *     it's possible that user changed key and insert to another
 *     hash bucket.
 * there's no way for us to finish iteration correctly on previous
 * two cases, so iteration has to be stopped on change.
 */
static int
cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
			void *data)
{
	struct hlist_node *hnode;
	struct hlist_node *tmp;
	struct cfs_hash_bd bd;
	__u32 version;
	int count = 0;
	int stop_on_change;
	int rc;
	int i;
	stop_on_change = cfs_hash_with_rehash_key(hs) ||
			 !cfs_hash_with_no_itemref(hs) ||
			 !hs->hs_ops->hs_put_locked;
	cfs_hash_lock(hs, 0);
	LASSERT(!cfs_hash_is_rehashing(hs));
	cfs_hash_for_each_bucket(hs, &bd, i) {
		struct hlist_head *hhead;
		cfs_hash_bd_lock(hs, &bd, 0);
		version = cfs_hash_bd_version_get(&bd);
		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
			for (hnode = hhead->first; hnode;) {
				cfs_hash_bucket_validate(hs, &bd, hnode);
				cfs_hash_get(hs, hnode);
				cfs_hash_bd_unlock(hs, &bd, 0);
				cfs_hash_unlock(hs, 0);
				rc = func(hs, &bd, hnode, data);
				if (stop_on_change)
					cfs_hash_put(hs, hnode);
				cond_resched();
				count++;
				cfs_hash_lock(hs, 0);
				cfs_hash_bd_lock(hs, &bd, 0);
				if (!stop_on_change) {
					tmp = hnode->next;
					cfs_hash_put_locked(hs, hnode);
					hnode = tmp;
				} else { /* bucket changed? */
					if (version !=
					    cfs_hash_bd_version_get(&bd))
						break;
					/* safe to continue because no change */
					hnode = hnode->next;
				}
				if (rc) /* callback wants to break iteration */
					break;
			}
			if (rc) /* callback wants to break iteration */
				break;
		}
		cfs_hash_bd_unlock(hs, &bd, 0);
		if (rc) /* callback wants to break iteration */
			break;
	}
	cfs_hash_unlock(hs, 0);
	return count;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 286 | 93.16% | 1 | 16.67% | 
| niu yawei | niu yawei | 12 | 3.91% | 1 | 16.67% | 
| dan carpenter | dan carpenter | 4 | 1.30% | 1 | 16.67% | 
| lisa nguyen | lisa nguyen | 4 | 1.30% | 2 | 33.33% | 
| oleg drokin | oleg drokin | 1 | 0.33% | 1 | 16.67% | 
 | Total | 307 | 100.00% | 6 | 100.00% | 
int
cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
			 void *data)
{
	if (cfs_hash_with_no_lock(hs) ||
	    cfs_hash_with_rehash_key(hs) ||
	    !cfs_hash_with_no_itemref(hs))
		return -EOPNOTSUPP;
	if (!hs->hs_ops->hs_get ||
	    (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
		return -EOPNOTSUPP;
	cfs_hash_for_each_enter(hs);
	cfs_hash_for_each_relax(hs, func, data);
	cfs_hash_for_each_exit(hs);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 67 | 74.44% | 1 | 20.00% | 
| dan carpenter | dan carpenter | 12 | 13.33% | 1 | 20.00% | 
| greg kroah-hartman | greg kroah-hartman | 6 | 6.67% | 1 | 20.00% | 
| oleg drokin | oleg drokin | 3 | 3.33% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 2 | 2.22% | 1 | 20.00% | 
 | Total | 90 | 100.00% | 5 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_for_each_nolock);
/**
 * For each hash bucket in the libcfs hash @hs call the passed callback
 * @func until all the hash buckets are empty.  The passed callback @func
 * or the previously registered callback hs->hs_put must remove the item
 * from the hash.  You may either use the cfs_hash_del() or hlist_del()
 * functions.  No rwlocks will be held during the callback @func it is
 * safe to sleep if needed.  This function will not terminate until the
 * hash is empty.  Note it is still possible to concurrently add new
 * items in to the hash.  It is the callers responsibility to ensure
 * the required locking is in place to prevent concurrent insertions.
 */
int
cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
			void *data)
{
	unsigned i = 0;
	if (cfs_hash_with_no_lock(hs))
		return -EOPNOTSUPP;
	if (!hs->hs_ops->hs_get ||
	    (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
		return -EOPNOTSUPP;
	cfs_hash_for_each_enter(hs);
	while (cfs_hash_for_each_relax(hs, func, data)) {
		CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
		       hs->hs_name, i++);
	}
	cfs_hash_for_each_exit(hs);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 83 | 81.37% | 1 | 20.00% | 
| dan carpenter | dan carpenter | 12 | 11.76% | 1 | 20.00% | 
| oleg drokin | oleg drokin | 3 | 2.94% | 1 | 20.00% | 
| lisa nguyen | lisa nguyen | 2 | 1.96% | 1 | 20.00% | 
| greg kroah-hartman | greg kroah-hartman | 2 | 1.96% | 1 | 20.00% | 
 | Total | 102 | 100.00% | 5 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_for_each_empty);
void
cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
			cfs_hash_for_each_cb_t func, void *data)
{
	struct hlist_head *hhead;
	struct hlist_node *hnode;
	struct cfs_hash_bd bd;
	cfs_hash_for_each_enter(hs);
	cfs_hash_lock(hs, 0);
	if (hindex >= CFS_HASH_NHLIST(hs))
		goto out;
	cfs_hash_bd_index_set(hs, hindex, &bd);
	cfs_hash_bd_lock(hs, &bd, 0);
	hhead = cfs_hash_bd_hhead(hs, &bd);
	hlist_for_each(hnode, hhead) {
		if (func(hs, &bd, hnode, data))
			break;
	}
	cfs_hash_bd_unlock(hs, &bd, 0);
out:
	cfs_hash_unlock(hs, 0);
	cfs_hash_for_each_exit(hs);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 131 | 97.04% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 4 | 2.96% | 2 | 66.67% | 
 | Total | 135 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_hlist_for_each);
/*
 * For each item in the libcfs hash @hs which matches the @key call
 * the passed callback @func and pass to it as an argument each hash
 * item and the private @data. During the callback the bucket lock
 * is held so the callback must never sleep.
   */
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
		      cfs_hash_for_each_cb_t func, void *data)
{
	struct hlist_node *hnode;
	struct cfs_hash_bd bds[2];
	unsigned int i;
	cfs_hash_lock(hs, 0);
	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
	cfs_hash_for_each_bd(bds, 2, i) {
		struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
		hlist_for_each(hnode, hlist) {
			cfs_hash_bucket_validate(hs, &bds[i], hnode);
			if (cfs_hash_keycmp(hs, key, hnode)) {
				if (func(hs, &bds[i], hnode, data))
					break;
			}
		}
	}
	cfs_hash_dual_bd_unlock(hs, bds, 0);
	cfs_hash_unlock(hs, 0);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 145 | 96.67% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 2.67% | 2 | 50.00% | 
| james simmons | james simmons | 1 | 0.67% | 1 | 25.00% | 
 | Total | 150 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_for_each_key);
/**
 * Rehash the libcfs hash @hs to the given @bits.  This can be used
 * to grow the hash size when excessive chaining is detected, or to
 * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
 * flag is set in @hs the libcfs hash may be dynamically rehashed
 * during addition or removal if the hash's theta value exceeds
 * either the hs->hs_min_theta or hs->max_theta values.  By default
 * these values are tuned to keep the chained hash depth small, and
 * this approach assumes a reasonably uniform hashing function.  The
 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
 */
void
cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
{
	int i;
	/* need hold cfs_hash_lock(hs, 1) */
	LASSERT(cfs_hash_with_rehash(hs) &&
		!cfs_hash_with_no_lock(hs));
	if (!cfs_hash_is_rehashing(hs))
		return;
	if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
		hs->hs_rehash_bits = 0;
		return;
	}
	for (i = 2; cfs_hash_is_rehashing(hs); i++) {
		cfs_hash_unlock(hs, 1);
		/* raise console warning while waiting too long */
		CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
		       "hash %s is still rehashing, rescheded %d\n",
		       hs->hs_name, i - 1);
		cond_resched();
		cfs_hash_lock(hs, 1);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 113 | 97.41% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 1.72% | 1 | 33.33% | 
| aya mahfouz | aya mahfouz | 1 | 0.86% | 1 | 33.33% | 
 | Total | 116 | 100.00% | 3 | 100.00% | 
void
cfs_hash_rehash_cancel(struct cfs_hash *hs)
{
	cfs_hash_lock(hs, 1);
	cfs_hash_rehash_cancel_locked(hs);
	cfs_hash_unlock(hs, 1);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 27 | 93.10% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 6.90% | 1 | 50.00% | 
 | Total | 29 | 100.00% | 2 | 100.00% | 
int
cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
{
	int rc;
	LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
	cfs_hash_lock(hs, 1);
	rc = cfs_hash_rehash_bits(hs);
	if (rc <= 0) {
		cfs_hash_unlock(hs, 1);
		return rc;
	}
	hs->hs_rehash_bits = rc;
	if (!do_rehash) {
		/* launch and return */
		cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
		cfs_hash_unlock(hs, 1);
		return 0;
	}
	/* rehash right now */
	cfs_hash_unlock(hs, 1);
	return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 111 | 98.23% | 1 | 50.00% | 
| lisa nguyen | lisa nguyen | 2 | 1.77% | 1 | 50.00% | 
 | Total | 113 | 100.00% | 2 | 100.00% | 
static int
cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
{
	struct cfs_hash_bd new;
	struct hlist_head *hhead;
	struct hlist_node *hnode;
	struct hlist_node *pos;
	void *key;
	int c = 0;
	/* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
	cfs_hash_bd_for_each_hlist(hs, old, hhead) {
		hlist_for_each_safe(hnode, pos, hhead) {
			key = cfs_hash_key(hs, hnode);
			LASSERT(key);
			/* Validate hnode is in the correct bucket. */
			cfs_hash_bucket_validate(hs, old, hnode);
			/*
                         * Delete from old hash bucket; move to new bucket.
                         * ops->hs_key must be defined.
                         */
			cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
					     hs->hs_rehash_bits, key, &new);
			cfs_hash_bd_move_locked(hs, old, &new, hnode);
			c++;
		}
	}
	return c;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 120 | 95.24% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 6 | 4.76% | 2 | 66.67% | 
 | Total | 126 | 100.00% | 3 | 100.00% | 
static int
cfs_hash_rehash_worker(struct cfs_workitem *wi)
{
	struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
	struct cfs_hash_bucket **bkts;
	struct cfs_hash_bd bd;
	unsigned int old_size;
	unsigned int new_size;
	int bsize;
	int count = 0;
	int rc = 0;
	int i;
	LASSERT(hs && cfs_hash_with_rehash(hs));
	cfs_hash_lock(hs, 0);
	LASSERT(cfs_hash_is_rehashing(hs));
	old_size = CFS_HASH_NBKT(hs);
	new_size = CFS_HASH_RH_NBKT(hs);
	cfs_hash_unlock(hs, 0);
	/*
         * don't need hs::hs_rwlock for hs::hs_buckets,
         * because nobody can change bkt-table except me.
         */
	bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
					old_size, new_size);
	cfs_hash_lock(hs, 1);
	if (!bkts) {
		rc = -ENOMEM;
		goto out;
	}
	if (bkts == hs->hs_buckets) {
		bkts = NULL; /* do nothing */
		goto out;
	}
	rc = __cfs_hash_theta(hs);
	if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
		/* free the new allocated bkt-table */
		old_size = new_size;
		new_size = CFS_HASH_NBKT(hs);
		rc = -EALREADY;
		goto out;
	}
	LASSERT(!hs->hs_rehash_buckets);
	hs->hs_rehash_buckets = bkts;
	rc = 0;
	cfs_hash_for_each_bucket(hs, &bd, i) {
		if (cfs_hash_is_exiting(hs)) {
			rc = -ESRCH;
			/* someone wants to destroy the hash, abort now */
			if (old_size < new_size) /* OK to free old bkt-table */
				break;
			/* it's shrinking, need free new bkt-table */
			hs->hs_rehash_buckets = NULL;
			old_size = new_size;
			new_size = CFS_HASH_NBKT(hs);
			goto out;
		}
		count += cfs_hash_rehash_bd(hs, &bd);
		if (count < CFS_HASH_LOOP_HOG ||
		    cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
			continue;
		}
		count = 0;
		cfs_hash_unlock(hs, 1);
		cond_resched();
		cfs_hash_lock(hs, 1);
	}
	hs->hs_rehash_count++;
	bkts = hs->hs_buckets;
	hs->hs_buckets = hs->hs_rehash_buckets;
	hs->hs_rehash_buckets = NULL;
	hs->hs_cur_bits = hs->hs_rehash_bits;
out:
	hs->hs_rehash_bits = 0;
	if (rc == -ESRCH) /* never be scheduled again */
		cfs_wi_exit(cfs_sched_rehash, wi);
	bsize = cfs_hash_bkt_size(hs);
	cfs_hash_unlock(hs, 1);
	/* can't refer to @hs anymore because it could be destroyed */
	if (bkts)
		cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
	if (rc != 0)
		CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
	/* return 1 only if cfs_wi_exit is called */
	return rc == -ESRCH;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 422 | 97.01% | 1 | 14.29% | 
| lisa nguyen | lisa nguyen | 8 | 1.84% | 3 | 42.86% | 
| james simmons | james simmons | 2 | 0.46% | 1 | 14.29% | 
| oleg drokin | oleg drokin | 2 | 0.46% | 1 | 14.29% | 
| masanari iida | masanari iida | 1 | 0.23% | 1 | 14.29% | 
 | Total | 435 | 100.00% | 7 | 100.00% | 
/**
 * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
 * @old_key must be provided to locate the objects previous location
 * in the hash, and the @new_key will be used to reinsert the object.
 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
 * combo when it is critical that there is no window in time where the
 * object is missing from the hash.  When an object is being rehashed
 * the registered cfs_hash_get() and cfs_hash_put() functions will
 * not be called.
 */
void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
			 void *new_key, struct hlist_node *hnode)
{
	struct cfs_hash_bd bds[3];
	struct cfs_hash_bd old_bds[2];
	struct cfs_hash_bd new_bd;
	LASSERT(!hlist_unhashed(hnode));
	cfs_hash_lock(hs, 0);
	cfs_hash_dual_bd_get(hs, old_key, old_bds);
	cfs_hash_bd_get(hs, new_key, &new_bd);
	bds[0] = old_bds[0];
	bds[1] = old_bds[1];
	bds[2] = new_bd;
	/* NB: bds[0] and bds[1] are ordered already */
	cfs_hash_bd_order(&bds[1], &bds[2]);
	cfs_hash_bd_order(&bds[0], &bds[1]);
	cfs_hash_multi_bd_lock(hs, bds, 3, 1);
	if (likely(!old_bds[1].bd_bucket)) {
		cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
	} else {
		cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
		cfs_hash_bd_add_locked(hs, &new_bd, hnode);
	}
	/* overwrite key inside locks, otherwise may screw up with
         * other operations, i.e: rehash
         */
	cfs_hash_keycpy(hs, hnode, new_key);
	cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
	cfs_hash_unlock(hs, 0);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 217 | 94.76% | 1 | 16.67% | 
| lisa nguyen | lisa nguyen | 8 | 3.49% | 2 | 33.33% | 
| oleg drokin | oleg drokin | 2 | 0.87% | 2 | 33.33% | 
| liang zhen | liang zhen | 2 | 0.87% | 1 | 16.67% | 
 | Total | 229 | 100.00% | 6 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_rehash_key);
void cfs_hash_debug_header(struct seq_file *m)
{
	seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
		   CFS_HASH_BIGNAME_LEN, "name");
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 18 | 85.71% | 2 | 50.00% | 
| joe perches | joe perches | 3 | 14.29% | 2 | 50.00% | 
 | Total | 21 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_debug_header);
static struct cfs_hash_bucket **
cfs_hash_full_bkts(struct cfs_hash *hs)
{
	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
	if (!hs->hs_rehash_buckets)
		return hs->hs_buckets;
	LASSERT(hs->hs_rehash_bits != 0);
	return hs->hs_rehash_bits > hs->hs_cur_bits ?
	       hs->hs_rehash_buckets : hs->hs_buckets;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 48 | 90.57% | 1 | 25.00% | 
| lisa nguyen | lisa nguyen | 4 | 7.55% | 2 | 50.00% | 
| oleg drokin | oleg drokin | 1 | 1.89% | 1 | 25.00% | 
 | Total | 53 | 100.00% | 4 | 100.00% | 
static unsigned int
cfs_hash_full_nbkt(struct cfs_hash *hs)
{
	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
	if (!hs->hs_rehash_buckets)
		return CFS_HASH_NBKT(hs);
	LASSERT(hs->hs_rehash_bits != 0);
	return hs->hs_rehash_bits > hs->hs_cur_bits ?
	       CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 51 | 94.44% | 1 | 33.33% | 
| lisa nguyen | lisa nguyen | 2 | 3.70% | 1 | 33.33% | 
| oleg drokin | oleg drokin | 1 | 1.85% | 1 | 33.33% | 
 | Total | 54 | 100.00% | 3 | 100.00% | 
void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
{
	int dist[8] = { 0, };
	int maxdep = -1;
	int maxdepb = -1;
	int total = 0;
	int theta;
	int i;
	cfs_hash_lock(hs, 0);
	theta = __cfs_hash_theta(hs);
	seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
		   CFS_HASH_BIGNAME_LEN, hs->hs_name,
		   1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
		   1 << hs->hs_max_bits,
		   __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
		   __cfs_hash_theta_int(hs->hs_min_theta),
		   __cfs_hash_theta_frac(hs->hs_min_theta),
		   __cfs_hash_theta_int(hs->hs_max_theta),
		   __cfs_hash_theta_frac(hs->hs_max_theta),
		   hs->hs_flags, hs->hs_rehash_count);
	/*
         * The distribution is a summary of the chained hash depth in
         * each of the libcfs hash buckets.  Each buckets hsb_count is
         * divided by the hash theta value and used to generate a
         * histogram of the hash distribution.  A uniform hash will
         * result in all hash buckets being close to the average thus
         * only the first few entries in the histogram will be non-zero.
         * If you hash function results in a non-uniform hash the will
         * be observable by outlier bucks in the distribution histogram.
         *
         * Uniform hash distribution:           128/128/0/0/0/0/0/0
         * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
         */
	for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
		struct cfs_hash_bd bd;
		bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
		cfs_hash_bd_lock(hs, &bd, 0);
		if (maxdep < bd.bd_bucket->hsb_depmax) {
			maxdep  = bd.bd_bucket->hsb_depmax;
			maxdepb = ffz(~maxdep);
		}
		total += bd.bd_bucket->hsb_count;
		dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
		cfs_hash_bd_unlock(hs, &bd, 0);
	}
	seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
	for (i = 0; i < 8; i++)
		seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
	cfs_hash_unlock(hs, 0);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 303 | 97.74% | 2 | 28.57% | 
| lisa nguyen | lisa nguyen | 4 | 1.29% | 2 | 28.57% | 
| james simmons | james simmons | 2 | 0.65% | 2 | 28.57% | 
| joe perches | joe perches | 1 | 0.32% | 1 | 14.29% | 
 | Total | 310 | 100.00% | 7 | 100.00% | 
EXPORT_SYMBOL(cfs_hash_debug_str);
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 8110 | 93.20% | 3 | 8.33% | 
| lisa nguyen | lisa nguyen | 320 | 3.68% | 4 | 11.11% | 
| james simmons | james simmons | 128 | 1.47% | 10 | 27.78% | 
| oleg drokin | oleg drokin | 32 | 0.37% | 2 | 5.56% | 
| dan carpenter | dan carpenter | 28 | 0.32% | 1 | 2.78% | 
| loic pefferkorn | loic pefferkorn | 16 | 0.18% | 1 | 2.78% | 
| greg kroah-hartman | greg kroah-hartman | 14 | 0.16% | 1 | 2.78% | 
| shraddha barke | shraddha barke | 12 | 0.14% | 1 | 2.78% | 
| niu yawei | niu yawei | 12 | 0.14% | 1 | 2.78% | 
| amitoj kaur chawla | amitoj kaur chawla | 7 | 0.08% | 2 | 5.56% | 
| ken helias | ken helias | 6 | 0.07% | 1 | 2.78% | 
| joe perches | joe perches | 5 | 0.06% | 3 | 8.33% | 
| aya mahfouz | aya mahfouz | 4 | 0.05% | 1 | 2.78% | 
| masanari iida | masanari iida | 3 | 0.03% | 2 | 5.56% | 
| julia lawall | julia lawall | 2 | 0.02% | 1 | 2.78% | 
| liang zhen | liang zhen | 2 | 0.02% | 1 | 2.78% | 
| dmitry eremin-solenikov | dmitry eremin-solenikov | 1 | 0.01% | 1 | 2.78% | 
 | Total | 8702 | 100.00% | 36 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.