Release 4.11 mm/mempool.c
/*
* linux/mm/mempool.c
*
* memory buffer pool support. Such pools are mostly used
* for guaranteed, deadlock-free memory allocations during
* extreme VM load.
*
* started by Ingo Molnar, Copyright (C) 2001
* debugging by David Rientjes, Copyright (C) 2015
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include "slab.h"
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
static void poison_error(mempool_t *pool, void *element, size_t size,
size_t byte)
{
const int nr = pool->curr_nr;
const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
int i;
pr_err("BUG: mempool element poison mismatch\n");
pr_err("Mempool %p size %zu\n", pool, size);
pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
for (i = start; i < end; i++)
pr_cont("%x ", *(u8 *)(element + i));
pr_cont("%s\n", end < size ? "..." : "");
dump_stack();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 145 | 100.00% | 1 | 100.00% |
Total | 145 | 100.00% | 1 | 100.00% |
static void __check_element(mempool_t *pool, void *element, size_t size)
{
u8 *obj = element;
size_t i;
for (i = 0; i < size; i++) {
u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
if (obj[i] != exp) {
poison_error(pool, element, size, i);
return;
}
}
memset(obj, POISON_INUSE, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 88 | 100.00% | 1 | 100.00% |
Total | 88 | 100.00% | 1 | 100.00% |
static void check_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
__check_element(pool, element, ksize(element));
/* Mempools backed by page allocator */
if (pool->free == mempool_free_pages) {
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
kunmap_atomic(addr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 99 | 100.00% | 1 | 100.00% |
Total | 99 | 100.00% | 1 | 100.00% |
static void __poison_element(void *element, size_t size)
{
u8 *obj = element;
memset(obj, POISON_FREE, size - 1);
obj[size - 1] = POISON_END;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
static void poison_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
__poison_element(element, ksize(element));
/* Mempools backed by page allocator */
if (pool->alloc == mempool_alloc_pages) {
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
kunmap_atomic(addr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 95 | 100.00% | 1 | 100.00% |
Total | 95 | 100.00% | 1 | 100.00% |
#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static inline void check_element(mempool_t *pool, void *element)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
static inline void poison_element(mempool_t *pool, void *element)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static void kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_poison_kfree(element);
if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrey Ryabinin | 54 | 100.00% | 2 | 100.00% |
Total | 54 | 100.00% | 2 | 100.00% |
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_slab(element);
if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrey Ryabinin | 54 | 94.74% | 2 | 66.67% |
Alexander Potapenko | 3 | 5.26% | 1 | 33.33% |
Total | 57 | 100.00% | 3 | 100.00% |
static void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
kasan_poison_element(pool, element);
pool->elements[pool->curr_nr++] = element;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 37 | 72.55% | 1 | 33.33% |
Andrey Ryabinin | 7 | 13.73% | 1 | 33.33% |
David Rientjes | 7 | 13.73% | 1 | 33.33% |
Total | 51 | 100.00% | 3 | 100.00% |
static void *remove_element(mempool_t *pool, gfp_t flags)
{
void *element = pool->elements[--pool->curr_nr];
BUG_ON(pool->curr_nr < 0);
kasan_unpoison_element(pool, element, flags);
check_element(pool, element);
return element;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 22 | 39.29% | 1 | 20.00% |
Andrew Morton | 21 | 37.50% | 1 | 20.00% |
Andrey Ryabinin | 6 | 10.71% | 1 | 20.00% |
Alexander Potapenko | 5 | 8.93% | 1 | 20.00% |
Matthew Dawson | 2 | 3.57% | 1 | 20.00% |
Total | 56 | 100.00% | 5 | 100.00% |
/**
* mempool_destroy - deallocate a memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
*
* Free all reserved elements in @pool and @pool itself. This function
* only sleeps if the free_fn() function sleeps.
*/
void mempool_destroy(mempool_t *pool)
{
if (unlikely(!pool))
return;
while (pool->curr_nr) {
void *element = remove_element(pool, GFP_KERNEL);
pool->free(element, pool->pool_data);
}
kfree(pool->elements);
kfree(pool);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 48 | 80.00% | 1 | 25.00% |
Sergey Senozhatsky | 9 | 15.00% | 1 | 25.00% |
Alexander Potapenko | 2 | 3.33% | 1 | 25.00% |
Tejun Heo | 1 | 1.67% | 1 | 25.00% |
Total | 60 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(mempool_destroy);
/**
* mempool_create - create a memory pool
* @min_nr: the minimum number of elements guaranteed to be
* allocated for this pool.
* @alloc_fn: user-defined element-allocation function.
* @free_fn: user-defined element-freeing function.
* @pool_data: optional private data available to the user-defined functions.
*
* this function creates and allocates a guaranteed size, preallocated
* memory pool. The pool can be used from the mempool_alloc() and mempool_free()
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
* functions might sleep - as long as the mempool_alloc() function is not called
* from IRQ contexts.
*/
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{
return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
GFP_KERNEL, NUMA_NO_NODE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 20 | 54.05% | 1 | 33.33% |
Christoph Lameter | 14 | 37.84% | 1 | 33.33% |
Tejun Heo | 3 | 8.11% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(mempool_create);
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
mempool_t *pool;
pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
if (!pool)
return NULL;
pool->elements = kmalloc_node(min_nr * sizeof(void *),
gfp_mask, node_id);
if (!pool->elements) {
kfree(pool);
return NULL;
}
spin_lock_init(&pool->lock);
pool->min_nr = min_nr;
pool->pool_data = pool_data;
init_waitqueue_head(&pool->wait);
pool->alloc = alloc_fn;
pool->free = free_fn;
/*
* First pre-allocate the guaranteed number of buffers.
*/
while (pool->curr_nr < pool->min_nr) {
void *element;
element = pool->alloc(gfp_mask, pool->pool_data);
if (unlikely(!element)) {
mempool_destroy(pool);
return NULL;
}
add_element(pool, element);
}
return pool;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 109 | 57.98% | 1 | 16.67% |
Andrew Morton | 43 | 22.87% | 1 | 16.67% |
Christoph Lameter | 28 | 14.89% | 1 | 16.67% |
Tejun Heo | 7 | 3.72% | 2 | 33.33% |
Joe Perches | 1 | 0.53% | 1 | 16.67% |
Total | 188 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(mempool_create_node);
/**
* mempool_resize - resize an existing memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @new_min_nr: the new minimum number of elements guaranteed to be
* allocated for this pool.
*
* This function shrinks/grows the pool. In the case of growing,
* it cannot be guaranteed that the pool will be grown to the new
* size immediately, but new mempool_free() calls will refill it.
* This function may sleep.
*
* Note, the caller must guarantee that no mempool_destroy is called
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
*/
int mempool_resize(mempool_t *pool, int new_min_nr)
{
void *element;
void **new_elements;
unsigned long flags;
BUG_ON(new_min_nr <= 0);
might_sleep();
spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr <= pool->min_nr) {
while (new_min_nr < pool->curr_nr) {
element = remove_element(pool, GFP_KERNEL);
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data);
spin_lock_irqsave(&pool->lock, flags);
}
pool->min_nr = new_min_nr;
goto out_unlock;
}
spin_unlock_irqrestore(&pool->lock, flags);
/* Grow the pool */
new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
GFP_KERNEL);
if (!new_elements)
return -ENOMEM;
spin_lock_irqsave(&pool->lock, flags);
if (unlikely(new_min_nr <= pool->min_nr)) {
/* Raced, other resize will do our work */
spin_unlock_irqrestore(&pool->lock, flags);
kfree(new_elements);
goto out;
}
memcpy(new_elements, pool->elements,
pool->curr_nr * sizeof(*new_elements));
kfree(pool->elements);
pool->elements = new_elements;
pool->min_nr = new_min_nr;
while (pool->curr_nr < pool->min_nr) {
spin_unlock_irqrestore(&pool->lock, flags);
element = pool->alloc(GFP_KERNEL, pool->pool_data);
if (!element)
goto out;
spin_lock_irqsave(&pool->lock, flags);
if (pool->curr_nr < pool->min_nr) {
add_element(pool, element);
} else {
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data); /* Raced */
goto out;
}
}
out_unlock:
spin_unlock_irqrestore(&pool->lock, flags);
out:
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alasdair G. Kergon | 297 | 85.84% | 1 | 20.00% |
Prasanna Meda | 38 | 10.98% | 1 | 20.00% |
David Rientjes | 7 | 2.02% | 1 | 20.00% |
Alexander Potapenko | 2 | 0.58% | 1 | 20.00% |
Andrew Morton | 2 | 0.58% | 1 | 20.00% |
Total | 346 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(mempool_resize);
/**
* mempool_alloc - allocate an element from a specific memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @gfp_mask: the usual allocation bitmask.
*
* this function only sleeps if the alloc_fn() function sleeps or
* returns NULL. Note that due to preallocation, this function
* *never* fails when called from process contexts. (it might
* fail if called from an IRQ context.)
* Note: using __GFP_ZERO is not supported.
*/
void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
void *element;
unsigned long flags;
wait_queue_t wait;
gfp_t gfp_temp;
VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
gfp_mask |= __GFP_NOWARN; /* failures are OK */
gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
repeat_alloc:
element = pool->alloc(gfp_temp, pool->pool_data);
if (likely(element != NULL))
return element;
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr)) {
element = remove_element(pool, gfp_temp);
spin_unlock_irqrestore(&pool->lock, flags);
/* paired with rmb in mempool_free(), read comment there */
smp_wmb();
/*
* Update the allocation stack trace as this is more useful
* for debugging.
*/
kmemleak_update_trace(element);
return element;
}
/*
* We use gfp mask w/o direct reclaim or IO for the first round. If
* alloc failed with that and @pool was empty, retry immediately.
*/
if (gfp_temp != gfp_mask) {
spin_unlock_irqrestore(&pool->lock, flags);
gfp_temp = gfp_mask;
goto repeat_alloc;
}
/* We must not sleep if !__GFP_DIRECT_RECLAIM */
if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
spin_unlock_irqrestore(&pool->lock, flags);
return NULL;
}
/* Let's wait for someone else to return an element to @pool */
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(&pool->lock, flags);
/*
* FIXME: this should be io_schedule(). The timeout is there as a
* workaround for some DM problems in 2.6.18.
*/
io_schedule_timeout(5*HZ);
finish_wait(&pool->wait, &wait);
goto repeat_alloc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 125 | 50.00% | 2 | 11.11% |
Tejun Heo | 51 | 20.40% | 2 | 11.11% |
Nicholas Piggin | 22 | 8.80% | 2 | 11.11% |
Andrew Morton | 11 | 4.40% | 3 | 16.67% |
Benjamin LaHaise | 8 | 3.20% | 1 | 5.56% |
Sebastian Ott | 7 | 2.80% | 1 | 5.56% |
Catalin Marinas | 6 | 2.40% | 1 | 5.56% |
Pavel Mironchik | 6 | 2.40% | 1 | 5.56% |
Michal Hocko | 5 | 2.00% | 1 | 5.56% |
Mel Gorman | 5 | 2.00% | 1 | 5.56% |
Alexander Potapenko | 2 | 0.80% | 1 | 5.56% |
Al Viro | 2 | 0.80% | 2 | 11.11% |
Total | 250 | 100.00% | 18 | 100.00% |
EXPORT_SYMBOL(mempool_alloc);
/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
*
* this function only sleeps if the free_fn() function sleeps.
*/
void mempool_free(void *element, mempool_t *pool)
{
unsigned long flags;
if (unlikely(element == NULL))
return;
/*
* Paired with the wmb in mempool_alloc(). The preceding read is
* for @element and the following @pool->curr_nr. This ensures
* that the visible value of @pool->curr_nr is from after the
* allocation of @element. This is necessary for fringe cases
* where @element was passed to this task without going through
* barriers.
*
* For example, assume @p is %NULL at the beginning and one task
* performs "p = mempool_alloc(...);" while another task is doing
* "while (!p) cpu_relax(); mempool_free(p, ...);". This function
* may end up using curr_nr value which is from before allocation
* of @p without the following rmb.
*/
smp_rmb();
/*
* For correctness, we need a test which is guaranteed to trigger
* if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
* without locking achieves that and refilling as soon as possible
* is desirable.
*
* Because curr_nr visible here is always a value after the
* allocation of @element, any task which decremented curr_nr below
* min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
* incremented to min_nr afterwards. If curr_nr gets incremented
* to min_nr after the allocation of @element, the elements
* allocated after that are subject to the same guarantee.
*
* Waiters happen iff curr_nr is 0 and the above guarantee also
* ensures that there will be frees which return elements to the
* pool waking up the waiters.
*/
if (unlikely(pool->curr_nr < pool->min_nr)) {
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr < pool->min_nr)) {
add_element(pool, element);
spin_unlock_irqrestore(&pool->lock, flags);
wake_up(&pool->wait);
return;
}
spin_unlock_irqrestore(&pool->lock, flags);
}
pool->free(element, pool->pool_data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 95 | 79.83% | 1 | 16.67% |
Rusty Russell | 10 | 8.40% | 1 | 16.67% |
Mikulas Patocka | 6 | 5.04% | 1 | 16.67% |
Andrew Morton | 5 | 4.20% | 2 | 33.33% |
Tejun Heo | 3 | 2.52% | 1 | 16.67% |
Total | 119 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(mempool_free);
/*
* A commonly used alloc and free fn.
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
struct kmem_cache *mem = pool_data;
VM_BUG_ON(mem->ctor);
return kmem_cache_alloc(mem, gfp_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 25 | 71.43% | 1 | 25.00% |
David Rientjes | 7 | 20.00% | 1 | 25.00% |
Pekka J Enberg | 2 | 5.71% | 1 | 25.00% |
Al Viro | 1 | 2.86% | 1 | 25.00% |
Total | 35 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(mempool_alloc_slab);
void mempool_free_slab(void *element, void *pool_data)
{
struct kmem_cache *mem = pool_data;
kmem_cache_free(mem, element);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alan Cox | 25 | 92.59% | 1 | 50.00% |
Pekka J Enberg | 2 | 7.41% | 1 | 50.00% |
Total | 27 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(mempool_free_slab);
/*
* A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
* specified by pool_data
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
size_t size = (size_t)pool_data;
return kmalloc(size, gfp_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Dobson | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(mempool_kmalloc);
void mempool_kfree(void *element, void *pool_data)
{
kfree(element);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Dobson | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(mempool_kfree);
/*
* A simple mempool-backed page allocator that allocates pages
* of the order specified by pool_data.
*/
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
{
int order = (int)(long)pool_data;
return alloc_pages(gfp_mask, order);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Dobson | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(mempool_alloc_pages);
void mempool_free_pages(void *element, void *pool_data)
{
int order = (int)(long)pool_data;
__free_pages(element, order);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Dobson | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(mempool_free_pages);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 562 | 28.10% | 3 | 6.52% |
Linus Torvalds | 364 | 18.20% | 4 | 8.70% |
Alasdair G. Kergon | 302 | 15.10% | 1 | 2.17% |
Andrew Morton | 187 | 9.35% | 6 | 13.04% |
Matthew Dobson | 131 | 6.55% | 2 | 4.35% |
Andrey Ryabinin | 124 | 6.20% | 2 | 4.35% |
Tejun Heo | 71 | 3.55% | 4 | 8.70% |
Alan Cox | 56 | 2.80% | 1 | 2.17% |
Christoph Lameter | 48 | 2.40% | 1 | 2.17% |
Prasanna Meda | 38 | 1.90% | 1 | 2.17% |
Nicholas Piggin | 22 | 1.10% | 2 | 4.35% |
Alexander Potapenko | 14 | 0.70% | 1 | 2.17% |
Rusty Russell | 10 | 0.50% | 1 | 2.17% |
Sergey Senozhatsky | 9 | 0.45% | 1 | 2.17% |
Catalin Marinas | 9 | 0.45% | 1 | 2.17% |
Benjamin LaHaise | 8 | 0.40% | 1 | 2.17% |
Sebastian Ott | 7 | 0.35% | 1 | 2.17% |
Michal Hocko | 6 | 0.30% | 1 | 2.17% |
Mikulas Patocka | 6 | 0.30% | 1 | 2.17% |
Pavel Mironchik | 6 | 0.30% | 1 | 2.17% |
Mel Gorman | 5 | 0.25% | 1 | 2.17% |
Pekka J Enberg | 4 | 0.20% | 1 | 2.17% |
Al Viro | 3 | 0.15% | 2 | 4.35% |
Christoph Hellwig | 2 | 0.10% | 1 | 2.17% |
Matthew Dawson | 2 | 0.10% | 1 | 2.17% |
Simon Arlott | 1 | 0.05% | 1 | 2.17% |
Joe Perches | 1 | 0.05% | 1 | 2.17% |
Robert P. J. Day | 1 | 0.05% | 1 | 2.17% |
Paul Gortmaker | 1 | 0.05% | 1 | 2.17% |
Total | 2000 | 100.00% | 46 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.