Release 4.16 lib/radix-tree.c
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
* Copyright (C) 2005 SGI, Christoph Lameter
* Copyright (C) 2006 Nick Piggin
* Copyright (C) 2012 Konstantin Khlebnikov
* Copyright (C) 2016 Intel, Matthew Wilcox
* Copyright (C) 2016 Intel, Ross Zwisler
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/percpu.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/string.h>
/* Number of nodes in fully populated tree of given height */
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
/*
* Radix tree node cache.
*/
static struct kmem_cache *radix_tree_node_cachep;
/*
* The radix tree is variable-height, so an insert operation not only has
* to build the branch to its corresponding item, it also has to build the
* branch to existing items if the size has to be increased (by
* radix_tree_extend).
*
* The worst case is a zero height tree with just a single item at index 0,
* and then inserting an item at index ULONG_MAX. This requires 2 new branches
* of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
* Hence:
*/
#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
/*
* The IDR does not have to be as high as the radix tree since it uses
* signed integers, not unsigned longs.
*/
#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
* The IDA is even shorter since it uses a bitmap at the last level.
*/
#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
unsigned nr;
/* nodes->parent points to next preallocated node */
struct radix_tree_node *nodes;
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline void *node_to_entry(void *ptr)
{
return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 25 | 92.59% | 1 | 33.33% |
Matthew Wilcox | 2 | 7.41% | 2 | 66.67% |
Total | 27 | 100.00% | 3 | 100.00% |
#define RADIX_TREE_RETRY node_to_entry(NULL)
#ifdef CONFIG_RADIX_TREE_MULTIORDER
/* Sibling slots point directly to another slot in the same node */
static inline
bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
{
void __rcu **ptr = node;
return (parent->slots <= ptr) &&
(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 44 | 100.00% | 3 | 100.00% |
Total | 44 | 100.00% | 3 | 100.00% |
#else
static inline
bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 20 | 100.00% | 2 | 100.00% |
Total | 20 | 100.00% | 2 | 100.00% |
#endif
static inline unsigned long
get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{
return slot - parent->slots;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 27 | 100.00% | 3 | 100.00% |
Total | 27 | 100.00% | 3 | 100.00% |
static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
struct radix_tree_node **nodep, unsigned long index)
{
unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
#ifdef CONFIG_RADIX_TREE_MULTIORDER
if (radix_tree_is_internal_node(entry)) {
if (is_sibling_entry(parent, entry)) {
void __rcu **sibentry;
sibentry = (void __rcu **) entry_to_node(entry);
offset = get_slot_offset(parent, sibentry);
entry = rcu_dereference_raw(*sibentry);
}
}
#endif
*nodep = (void *)entry;
return offset;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 102 | 80.95% | 5 | 83.33% |
Linus Torvalds | 24 | 19.05% | 1 | 16.67% |
Total | 126 | 100.00% | 6 | 100.00% |
static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
{
return root->gfp_mask & __GFP_BITS_MASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 19 | 95.00% | 1 | 50.00% |
Matthew Wilcox | 1 | 5.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
int offset)
{
__set_bit(offset, node->tags[tag]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 25 | 80.65% | 2 | 50.00% |
Andrew Morton | 6 | 19.35% | 2 | 50.00% |
Total | 31 | 100.00% | 4 | 100.00% |
static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
int offset)
{
__clear_bit(offset, node->tags[tag]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 24 | 77.42% | 1 | 50.00% |
Andrew Morton | 7 | 22.58% | 1 | 50.00% |
Total | 31 | 100.00% | 2 | 100.00% |
static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
int offset)
{
return test_bit(offset, node->tags[tag]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 28 | 84.85% | 1 | 33.33% |
Andrew Morton | 4 | 12.12% | 1 | 33.33% |
Matthew Wilcox | 1 | 3.03% | 1 | 33.33% |
Total | 33 | 100.00% | 3 | 100.00% |
static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
{
root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 31 | 93.94% | 3 | 60.00% |
Matthew Wilcox | 1 | 3.03% | 1 | 20.00% |
Andrew Morton | 1 | 3.03% | 1 | 20.00% |
Total | 33 | 100.00% | 5 | 100.00% |
static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{
root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 33 | 97.06% | 2 | 66.67% |
Matthew Wilcox | 1 | 2.94% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
static inline void root_tag_clear_all(struct radix_tree_root *root)
{
root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 10 | 41.67% | 3 | 50.00% |
Nicholas Piggin | 7 | 29.17% | 2 | 33.33% |
Matthew Wilcox | 7 | 29.17% | 1 | 16.67% |
Total | 24 | 100.00% | 6 | 100.00% |
static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
{
return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 32 | 91.43% | 1 | 25.00% |
Matthew Wilcox | 3 | 8.57% | 3 | 75.00% |
Total | 35 | 100.00% | 4 | 100.00% |
static inline unsigned root_tags_get(const struct radix_tree_root *root)
{
return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 24 | 100.00% | 3 | 100.00% |
Total | 24 | 100.00% | 3 | 100.00% |
static inline bool is_idr(const struct radix_tree_root *root)
{
return !!(root->gfp_mask & ROOT_IS_IDR);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 24 | 100.00% | 2 | 100.00% |
Total | 24 | 100.00% | 2 | 100.00% |
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
*/
static inline int any_tag_set(const struct radix_tree_node *node,
unsigned int tag)
{
unsigned idx;
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
if (node->tags[tag][idx])
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 39 | 73.58% | 1 | 25.00% |
Andrew Morton | 12 | 22.64% | 1 | 25.00% |
Matthew Wilcox | 2 | 3.77% | 2 | 50.00% |
Total | 53 | 100.00% | 4 | 100.00% |
static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
{
bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
/**
* radix_tree_find_next_bit - find the next set bit in a memory region
*
* @addr: The address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Unrollable variant of find_next_bit() for constant size arrays.
* Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
* Returns next bit offset, or size if nothing found.
*/
static __always_inline unsigned long
radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
unsigned long offset)
{
const unsigned long *addr = node->tags[tag];
if (offset < RADIX_TREE_MAP_SIZE) {
unsigned long tmp;
addr += offset / BITS_PER_LONG;
tmp = *addr >> (offset % BITS_PER_LONG);
if (tmp)
return __ffs(tmp) + offset;
offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
while (offset < RADIX_TREE_MAP_SIZE) {
tmp = *++addr;
if (tmp)
return __ffs(tmp) + offset;
offset += BITS_PER_LONG;
}
}
return RADIX_TREE_MAP_SIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konstantin Khlebnikov | 103 | 83.74% | 1 | 50.00% |
Matthew Wilcox | 20 | 16.26% | 1 | 50.00% |
Total | 123 | 100.00% | 2 | 100.00% |
static unsigned int iter_offset(const struct radix_tree_iter *iter)
{
return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
/*
* The maximum index which can be stored in a radix tree
*/
static inline unsigned long shift_maxindex(unsigned int shift)
{
return (RADIX_TREE_MAP_SIZE << shift) - 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static inline unsigned long node_maxindex(const struct radix_tree_node *node)
{
return shift_maxindex(node->shift);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 22 | 100.00% | 2 | 100.00% |
Total | 22 | 100.00% | 2 | 100.00% |
static unsigned long next_index(unsigned long index,
const struct radix_tree_node *node,
unsigned long offset)
{
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
#ifndef __KERNEL__
static void dump_node(struct radix_tree_node *node, unsigned long index)
{
unsigned long i;
pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
node, node->offset, index, index | node_maxindex(node),
node->parent,
node->tags[0][0], node->tags[1][0], node->tags[2][0],
node->shift, node->count, node->exceptional);
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
unsigned long first = index | (i << node->shift);
unsigned long last = first | ((1UL << node->shift) - 1);
void *entry = node->slots[i];
if (!entry)
continue;
if (entry == RADIX_TREE_RETRY) {
pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
i, first, last, node);
} else if (!radix_tree_is_internal_node(entry)) {
pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
entry, i, first, last, node);
} else if (is_sibling_entry(node, entry)) {
pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
entry, i, first, last, node,
*(void **)entry_to_node(entry));
} else {
dump_node(entry_to_node(entry), first);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ross Zwisler | 180 | 72.87% | 1 | 12.50% |
Matthew Wilcox | 66 | 26.72% | 6 | 75.00% |
Johannes Weiner | 1 | 0.40% | 1 | 12.50% |
Total | 247 | 100.00% | 8 | 100.00% |
/* For debug */
static void radix_tree_dump(struct radix_tree_root *root)
{
pr_debug("radix root: %p rnode %p tags %x\n",
root, root->rnode,
root->gfp_mask >> ROOT_TAG_SHIFT);
if (!radix_tree_is_internal_node(root->rnode))
return;
dump_node(entry_to_node(root->rnode), 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ross Zwisler | 47 | 92.16% | 1 | 20.00% |
Matthew Wilcox | 4 | 7.84% | 4 | 80.00% |
Total | 51 | 100.00% | 5 | 100.00% |
static void dump_ida_node(void *entry, unsigned long index)
{
unsigned long i;
if (!entry)
return;
if (radix_tree_is_internal_node(entry)) {
struct radix_tree_node *node = entry_to_node(entry);
pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
node, node->offset, index * IDA_BITMAP_BITS,
((index | node_maxindex(node)) + 1) *
IDA_BITMAP_BITS - 1,
node->parent, node->tags[0][0], node->shift,
node->count);
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
dump_ida_node(node->slots[i],
index | (i << node->shift));
} else if (radix_tree_exceptional_entry(entry)) {
pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
entry, (int)(index & RADIX_TREE_MAP_MASK),
index * IDA_BITMAP_BITS,
index * IDA_BITMAP_BITS + BITS_PER_LONG -
RADIX_TREE_EXCEPTIONAL_SHIFT,
(unsigned long)entry >>
RADIX_TREE_EXCEPTIONAL_SHIFT);
} else {
struct ida_bitmap *bitmap = entry;
pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
(int)(index & RADIX_TREE_MAP_MASK),
index * IDA_BITMAP_BITS,
(index + 1) * IDA_BITMAP_BITS - 1);
for (i = 0; i < IDA_BITMAP_LONGS; i++)
pr_cont(" %lx", bitmap->bitmap[i]);
pr_cont("\n");
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 196 | 79.67% | 5 | 33.33% |
Nicholas Piggin | 22 | 8.94% | 2 | 13.33% |
Andrew Morton | 18 | 7.32% | 3 | 20.00% |
Catalin Marinas | 3 | 1.22% | 1 | 6.67% |
Vladimir Davydov | 3 | 1.22% | 1 | 6.67% |
Mel Gorman | 2 | 0.81% | 1 | 6.67% |
Kirill A. Shutemov | 1 | 0.41% | 1 | 6.67% |
Al Viro | 1 | 0.41% | 1 | 6.67% |
Total | 246 | 100.00% | 15 | 100.00% |
static void ida_dump(struct ida *ida)
{
struct radix_tree_root *root = &ida->ida_rt;
pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
root->gfp_mask >> ROOT_TAG_SHIFT);
dump_ida_node(root->rnode, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 34 | 72.34% | 2 | 50.00% |
Nicholas Piggin | 13 | 27.66% | 2 | 50.00% |
Total | 47 | 100.00% | 4 | 100.00% |
#endif
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
*/
static struct radix_tree_node *
radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
struct radix_tree_root *root,
unsigned int shift, unsigned int offset,
unsigned int count, unsigned int exceptional)
{
struct radix_tree_node *ret = NULL;
/*
* Preload code isn't irq safe and it doesn't make sense to use
* preloading during an interrupt anyway as all the allocations have
* to be atomic. So just do normal allocation when in interrupt.
*/
if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
struct radix_tree_preload *rtp;
/*
* Even if the caller has preloaded, try to allocate from the
* cache first for the new node to get accounted to the memory
* cgroup.
*/
ret = kmem_cache_alloc(radix_tree_node_cachep,
gfp_mask | __GFP_NOWARN);
if (ret)
goto out;
/*
* Provided the caller has preloaded here, we will always
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes;
rtp->nodes = ret->parent;
rtp->nr--;
}
/*
* Update the allocation stack trace as this is more useful
* for debugging.
*/
kmemleak_update_trace(ret);
goto out;
}
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
out:
BUG_ON(radix_tree_is_internal_node(ret));
if (ret) {
ret->shift = shift;
ret->offset = offset;
ret->count = count;
ret->exceptional = exceptional;
ret->parent = parent;
ret->root = root;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 93 | 48.44% | 6 | 35.29% |
Nicholas Piggin | 64 | 33.33% | 2 | 11.76% |
Kirill A. Shutemov | 10 | 5.21% | 2 | 11.76% |
Andrew Morton | 8 | 4.17% | 2 | 11.76% |
Jan Kara | 6 | 3.12% | 1 | 5.88% |
Vladimir Davydov | 4 | 2.08% | 1 | 5.88% |
Catalin Marinas | 3 | 1.56% | 1 | 5.88% |
Christoph Lameter | 2 | 1.04% | 1 | 5.88% |
Mel Gorman | 2 | 1.04% | 1 | 5.88% |
Total | 192 | 100.00% | 17 | 100.00% |
static void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
/*
* Must only free zeroed nodes into the slab. We can be left with
* non-NULL entries by radix_tree_free_nodes, so clear the entries
* and tags here.
*/
memset(node->slots, 0, sizeof(node->slots));
memset(node->tags, 0, sizeof(node->tags));
INIT_LIST_HEAD(&node->private_list);
kmem_cache_free(radix_tree_node_cachep, node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 31 | 41.89% | 2 | 25.00% |
Nicholas Piggin | 21 | 28.38% | 2 | 25.00% |
Andrew Morton | 11 | 14.86% | 1 | 12.50% |
Jan Kara | 8 | 10.81% | 1 | 12.50% |
Mel Gorman | 2 | 2.70% | 1 | 12.50% |
Kirill A. Shutemov | 1 | 1.35% | 1 | 12.50% |
Total | 74 | 100.00% | 8 | 100.00% |
static inline void
radix_tree_node_free(struct radix_tree_node *node)
{
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 19 | 86.36% | 2 | 66.67% |
Jan Kara | 3 | 13.64% | 1 | 33.33% |
Total | 22 | 100.00% | 3 | 100.00% |
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
*
* To make use of this facility, the radix tree must be initialised without
* __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
*/
static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
int ret = -ENOMEM;
/*
* Nodes preloaded by one cgroup can be be used by another cgroup, so
* they should never be accounted to any particular memory cgroup.
*/
gfp_mask &= ~__GFP_ACCOUNT;
preempt_disable();
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
preempt_enable();
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
rtp->nodes = node;
rtp->nr++;
} else {
kmem_cache_free(radix_tree_node_cachep, node);
}
}
ret = 0;
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 86 | 63.24% | 2 | 20.00% |
Matthew Wilcox | 25 | 18.38% | 2 | 20.00% |
Kirill A. Shutemov | 14 | 10.29% | 2 | 20.00% |
Vladimir Davydov | 4 | 2.94% | 1 | 10.00% |
Christoph Lameter | 4 | 2.94% | 1 | 10.00% |
Jan Kara | 2 | 1.47% | 1 | 10.00% |
Eric Dumazet | 1 | 0.74% | 1 | 10.00% |
Total | 136 | 100.00% | 10 | 100.00% |
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
*
* To make use of this facility, the radix tree must be initialised without
* __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
*/
int radix_tree_preload(gfp_t gfp_mask)
{
/* Warn on non-sensical use... */
WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 21 | 80.77% | 1 | 50.00% |
Jan Kara | 5 | 19.23% | 1 | 50.00% |
Total | 26 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(radix_tree_preload);
/*
* The same as above function, except we don't guarantee preloading happens.
* We do it, if we decide it helps. On success, return zero with preemption
* disabled. On error, return -ENOMEM with preemption not disabled.
*/
int radix_tree_maybe_preload(gfp_t gfp_mask)
{
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
preempt_disable();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 16 | 53.33% | 1 | 25.00% |
Jan Kara | 11 | 36.67% | 1 | 25.00% |
Kirill A. Shutemov | 2 | 6.67% | 1 | 25.00% |
Mel Gorman | 1 | 3.33% | 1 | 25.00% |
Total | 30 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(radix_tree_maybe_preload);
#ifdef CONFIG_RADIX_TREE_MULTIORDER
/*
* Preload with enough objects to ensure that we can split a single entry
* of order @old_order into many entries of size @new_order
*/
int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
gfp_t gfp_mask)
{
unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
(new_order / RADIX_TREE_MAP_SHIFT);
unsigned nr = 0;
WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
BUG_ON(new_order >= old_order);
while (layers--)
nr = nr * RADIX_TREE_MAP_SIZE + 1;
return __radix_tree_preload(gfp_mask, top * nr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 72 | 83.72% | 3 | 50.00% |
Andrew Morton | 13 | 15.12% | 2 | 33.33% |
Hugh Dickins | 1 | 1.16% | 1 | 16.67% |
Total | 86 | 100.00% | 6 | 100.00% |
#endif
/*
* The same as function above, but preload number of nodes required to insert
* (1 << order) continuous naturally-aligned elements.
*/
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
{
unsigned long nr_subtrees;
int nr_nodes, subtree_height;
/* Preloading doesn't help anything with this gfp mask, skip it */
if (!gfpflags_allow_blocking(gfp_mask)) {
preempt_disable();
return 0;
}
/*
* Calculate number and height of fully populated subtrees it takes to
* store (1 << order) elements.
*/
nr_subtrees = 1 << order;
for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE;
subtree_height++)
nr_subtrees >>= RADIX_TREE_MAP_SHIFT;
/*
* The worst case is zero height tree with a single item at index 0 and
* then inserting items starting at ULONG_MAX - (1 << order).
*
* This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
* 0-index item.
*/
nr_nodes = RADIX_TREE_MAX_PATH;
/* Plus branch to fully populated subtrees. */
nr_nodes += RADIX_TREE_MAX_PATH - subtree_height;
/* Root node is shared. */
nr_nodes--;
/* Plus nodes required to build subtrees. */
nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height];
return __radix_tree_preload(gfp_mask, nr_nodes);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 76 | 80.00% | 1 | 16.67% |
Andrew Morton | 12 | 12.63% | 2 | 33.33% |
Matthew Wilcox | 6 | 6.32% | 2 | 33.33% |
Johannes Weiner | 1 | 1.05% | 1 | 16.67% |
Total | 95 | 100.00% | 6 | 100.00% |
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
*nodep = node;
if (likely(radix_tree_is_internal_node(node))) {
node = entry_to_node(node);
*maxindex = node_maxindex(node);
return node->shift + RADIX_TREE_MAP_SHIFT;
}
*maxindex = 0;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 57 | 69.51% | 7 | 63.64% |
Johannes Weiner | 8 | 9.76% | 1 | 9.09% |
Hugh Dickins | 7 | 8.54% | 1 | 9.09% |
Nicholas Piggin | 5 | 6.10% | 1 | 9.09% |
Andrew Morton | 5 | 6.10% | 1 | 9.09% |
Total | 82 | 100.00% | 11 | 100.00% |
/*
* Extend a radix tree so it can store key @index.
*/
static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
unsigned long index, unsigned int shift)
{
void *entry;
unsigned int maxshift;
int tag;
/* Figure out what the shift should be. */
maxshift = shift;
while (index > shift_maxindex(maxshift))
maxshift += RADIX_TREE_MAP_SHIFT;
entry = rcu_dereference_raw(root->rnode);
if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
goto out;
do {
struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
root, shift, 0, 1, 0);
if (!node)
return -ENOMEM;
if (is_idr(root)) {
all_tag_set(node, IDR_FREE);
if (!root_tag_get(root, IDR_FREE)) {
tag_clear(node, IDR_FREE, 0);
root_tag_set(root, IDR_FREE);
}
} else {
/* Propagate the aggregated tag info to the new child */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
}
BUG_ON(shift > BITS_PER_LONG);
if (radix_tree_is_internal_node(entry)) {
entry_to_node(entry)->parent = node;
} else if (radix_tree_exceptional_entry(entry)) {
/* Moving an exceptional root->rnode to a node */
node->exceptional = 1;
}
/*
* entry was already in the radix tree, so we do not need
* rcu_assign_pointer here
*/
node->slots[0] = (void __rcu *)entry;
entry = node_to_entry(node);
rcu_assign_pointer(root->rnode, entry);
shift += RADIX_TREE_MAP_SHIFT;
} while (shift <= maxshift);
out:
return maxshift + RADIX_TREE_MAP_SHIFT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 138 | 47.75% | 16 | 55.17% |
Andrew Morton | 74 | 25.61% | 2 | 6.90% |
Johannes Weiner | 59 | 20.42% | 6 | 20.69% |
Hugh Dickins | 9 | 3.11% | 1 | 3.45% |
Nicholas Piggin | 8 | 2.77% | 3 | 10.34% |
Jonathan Corbet | 1 | 0.35% | 1 | 3.45% |
Total | 289 | 100.00% | 29 | 100.00% |
/**
* radix_tree_shrink - shrink radix tree to minimum height
* @root radix tree root
*/
static inline bool radix_tree_shrink(struct radix_tree_root *root,
radix_tree_update_node_t update_node)
{
bool shrunk = false;
for (;;) {
struct radix_tree_node *node = rcu_dereference_raw(