Release 4.11 fs/btrfs/ulist.c
/*
* Copyright (C) 2011 STRATO AG
* written by Arne Jansen <sensille@gmx.net>
* Distributed under the GNU GPL license version 2.
*/
#include <linux/slab.h>
#include "ulist.h"
#include "ctree.h"
/*
* ulist is a generic data structure to hold a collection of unique u64
* values. The only operations it supports is adding to the list and
* enumerating it.
* It is possible to store an auxiliary value along with the key.
*
* A sample usage for ulists is the enumeration of directed graphs without
* visiting a node twice. The pseudo-code could look like this:
*
* ulist = ulist_alloc();
* ulist_add(ulist, root);
* ULIST_ITER_INIT(&uiter);
*
* while ((elem = ulist_next(ulist, &uiter)) {
* for (all child nodes n in elem)
* ulist_add(ulist, n);
* do something useful with the node;
* }
* ulist_free(ulist);
*
* This assumes the graph nodes are addressable by u64. This stems from the
* usage for tree enumeration in btrfs, where the logical addresses are
* 64 bit.
*
* It is also useful for tree enumeration which could be done elegantly
* recursively, but is not possible due to kernel stack limitations. The
* loop would be similar to the above.
*/
/**
* ulist_init - freshly initialize a ulist
* @ulist: the ulist to initialize
*
* Note: don't use this function to init an already used ulist, use
* ulist_reinit instead.
*/
void ulist_init(struct ulist *ulist)
{
INIT_LIST_HEAD(&ulist->nodes);
ulist->root = RB_ROOT;
ulist->nnodes = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arne Jansen | 18 | 60.00% | 1 | 33.33% |
Shilong Wang | 12 | 40.00% | 2 | 66.67% |
Total | 30 | 100.00% | 3 | 100.00% |
/**
* ulist_release - free up additionally allocated memory for the ulist
* @ulist: the ulist from which to free the additional memory
*
* This is useful in cases where the base 'struct ulist' has been statically
* allocated.
*/
void ulist_release(struct ulist *ulist)
{
struct ulist_node *node;
struct ulist_node *next;
list_for_each_entry_safe(node, next, &ulist->nodes, list) {
kfree(node);
}
ulist->root = RB_ROOT;
INIT_LIST_HEAD(&ulist->nodes);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shilong Wang | 33 | 64.71% | 2 | 50.00% |
Arne Jansen | 17 | 33.33% | 1 | 25.00% |
David Sterba | 1 | 1.96% | 1 | 25.00% |
Total | 51 | 100.00% | 4 | 100.00% |
/**
* ulist_reinit - prepare a ulist for reuse
* @ulist: ulist to be reused
*
* Free up all additional memory allocated for the list elements and reinit
* the ulist.
*/
void ulist_reinit(struct ulist *ulist)
{
ulist_release(ulist);
ulist_init(ulist);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arne Jansen | 19 | 95.00% | 1 | 50.00% |
David Sterba | 1 | 5.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
/**
* ulist_alloc - dynamically allocate a ulist
* @gfp_mask: allocation flags to for base allocation
*
* The allocated ulist will be returned in an initialized state.
*/
struct ulist *ulist_alloc(gfp_t gfp_mask)
{
struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
if (!ulist)
return NULL;
ulist_init(ulist);
return ulist;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arne Jansen | 41 | 97.62% | 1 | 50.00% |
Daniel J Blueman | 1 | 2.38% | 1 | 50.00% |
Total | 42 | 100.00% | 2 | 100.00% |
/**
* ulist_free - free dynamically allocated ulist
* @ulist: ulist to free
*
* It is not necessary to call ulist_release before.
*/
void ulist_free(struct ulist *ulist)
{
if (!ulist)
return;
ulist_release(ulist);
kfree(ulist);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arne Jansen | 25 | 96.15% | 1 | 50.00% |
David Sterba | 1 | 3.85% | 1 | 50.00% |
Total | 26 | 100.00% | 2 | 100.00% |
static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
{
struct rb_node *n = ulist->root.rb_node;
struct ulist_node *u = NULL;
while (n) {
u = rb_entry(n, struct ulist_node, rb_node);
if (u->val < val)
n = n->rb_right;
else if (u->val > val)
n = n->rb_left;
else
return u;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shilong Wang | 88 | 100.00% | 1 | 100.00% |
Total | 88 | 100.00% | 1 | 100.00% |
static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
{
rb_erase(&node->rb_node, &ulist->root);
list_del(&node->list);
kfree(node);
BUG_ON(ulist->nnodes == 0);
ulist->nnodes--;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Qu Wenruo | 56 | 100.00% | 1 | 100.00% |
Total | 56 | 100.00% | 1 | 100.00% |
static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
{
struct rb_node **p = &ulist->root.rb_node;
struct rb_node *parent = NULL;
struct ulist_node *cur = NULL;
while (*p) {
parent = *p;
cur = rb_entry(parent, struct ulist_node, rb_node);
if (cur->val < ins->val)
p = &(*p)->rb_right;
else if (cur->val > ins->val)
p = &(*p)->rb_left;
else
return -EEXIST;
}
rb_link_node(&ins->rb_node, parent, p);
rb_insert_color(&ins->rb_node, &ulist->root);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shilong Wang | 141 | 100.00% | 1 | 100.00% |
Total | 141 | 100.00% | 1 | 100.00% |
/**
* ulist_add - add an element to the ulist
* @ulist: ulist to add the element to
* @val: value to add to ulist
* @aux: auxiliary value to store along with val
* @gfp_mask: flags to use for allocation
*
* Note: locking must be provided by the caller. In case of rwlocks write
* locking is needed
*
* Add an element to a ulist. The @val will only be added if it doesn't
* already exist. If it is added, the auxiliary value @aux is stored along with
* it. In case @val already exists in the ulist, @aux is ignored, even if
* it differs from the already stored value.
*
* ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
* inserted.
* In case of allocation failure -ENOMEM is returned and the ulist stays
* unaltered.
*/
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
{
return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arne Jansen | 16 | 48.48% | 1 | 25.00% |
Jan Schmidt | 15 | 45.45% | 1 | 25.00% |
Alexander Block | 1 | 3.03% | 1 | 25.00% |
Daniel J Blueman | 1 | 3.03% | 1 | 25.00% |
Total | 33 | 100.00% | 4 | 100.00% |
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
u64 *old_aux, gfp_t gfp_mask)
{
int ret;
struct ulist_node *node;
node = ulist_rbtree_search(ulist, val);
if (node) {
if (old_aux)
*old_aux = node->aux;
return 0;
}
node = kmalloc(sizeof(*node), gfp_mask);
if (!node)
return -ENOMEM;
node->val = val;
node->aux = aux;
ret = ulist_rbtree_insert(ulist, node);
ASSERT(!ret);
list_add_tail(&node->list, &ulist->nodes);
ulist->nnodes++;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arne Jansen | 44 | 33.85% | 1 | 14.29% |
Shilong Wang | 44 | 33.85% | 2 | 28.57% |
Jan Schmidt | 30 | 23.08% | 1 | 14.29% |
Liu Bo | 9 | 6.92% | 1 | 14.29% |
Alexander Block | 2 | 1.54% | 1 | 14.29% |
Chris Mason | 1 | 0.77% | 1 | 14.29% |
Total | 130 | 100.00% | 7 | 100.00% |
/*
* ulist_del - delete one node from ulist
* @ulist: ulist to remove node from
* @val: value to delete
* @aux: aux to delete
*
* The deletion will only be done when *BOTH* val and aux matches.
* Return 0 for successful delete.
* Return > 0 for not found.
*/
int ulist_del(struct ulist *ulist, u64 val, u64 aux)
{
struct ulist_node *node;
node = ulist_rbtree_search(ulist, val);
/* Not found */
if (!node)
return 1;
if (node->aux != aux)
return 1;
/* Found and delete */
ulist_rbtree_erase(ulist, node);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Qu Wenruo | 61 | 100.00% | 1 | 100.00% |
Total | 61 | 100.00% | 1 | 100.00% |
/**
* ulist_next - iterate ulist
* @ulist: ulist to iterate
* @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
*
* Note: locking must be provided by the caller. In case of rwlocks only read
* locking is needed
*
* This function is used to iterate an ulist.
* It returns the next element from the ulist or %NULL when the
* end is reached. No guarantee is made with respect to the order in which
* the elements are returned. They might neither be returned in order of
* addition nor in ascending order.
* It is allowed to call ulist_add during an enumeration. Newly added items
* are guaranteed to show up in the running enumeration.
*/
struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
{
struct ulist_node *node;
if (list_empty(&ulist->nodes))
return NULL;
if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
return NULL;
if (uiter->cur_list) {
uiter->cur_list = uiter->cur_list->next;
} else {
uiter->cur_list = ulist->nodes.next;
}
node = list_entry(uiter->cur_list, struct ulist_node, list);
return node;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shilong Wang | 62 | 60.19% | 1 | 33.33% |
Arne Jansen | 36 | 34.95% | 1 | 33.33% |
Jan Schmidt | 5 | 4.85% | 1 | 33.33% |
Total | 103 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shilong Wang | 383 | 47.88% | 2 | 16.67% |
Arne Jansen | 227 | 28.38% | 1 | 8.33% |
Qu Wenruo | 118 | 14.75% | 1 | 8.33% |
Jan Schmidt | 51 | 6.38% | 2 | 16.67% |
Liu Bo | 9 | 1.12% | 1 | 8.33% |
David Sterba | 5 | 0.62% | 1 | 8.33% |
Alexander Block | 3 | 0.38% | 1 | 8.33% |
Daniel J Blueman | 2 | 0.25% | 1 | 8.33% |
Nicholas D Steeves | 1 | 0.12% | 1 | 8.33% |
Chris Mason | 1 | 0.12% | 1 | 8.33% |
Total | 800 | 100.00% | 12 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.