Release 4.12 drivers/md/dm-bio-prison-v2.c
  
  
  
/*
 * Copyright (C) 2012-2017 Red Hat, Inc.
 *
 * This file is released under the GPL.
 */
#include "dm.h"
#include "dm-bio-prison-v2.h"
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
/*----------------------------------------------------------------*/
#define MIN_CELLS 1024
struct dm_bio_prison_v2 {
	
struct workqueue_struct *wq;
	
spinlock_t lock;
	
mempool_t *cell_pool;
	
struct rb_root cells;
};
static struct kmem_cache *_cell_cache;
/*----------------------------------------------------------------*/
/*
 * @nr_cells should be the number of cells you want in use _concurrently_.
 * Don't confuse it with the number of distinct keys.
 */
struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
{
	struct dm_bio_prison_v2 *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
	if (!prison)
		return NULL;
	prison->wq = wq;
	spin_lock_init(&prison->lock);
	prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
	if (!prison->cell_pool) {
		kfree(prison);
		return NULL;
	}
	prison->cells = RB_ROOT;
	return prison;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mike Snitzer | 58 | 66.67% | 1 | 33.33% | 
| Joe Thornber | 29 | 33.33% | 2 | 66.67% | 
| Total | 87 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
{
	mempool_destroy(prison->cell_pool);
	kfree(prison);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mike Snitzer | 20 | 90.91% | 1 | 50.00% | 
| Joe Thornber | 2 | 9.09% | 1 | 50.00% | 
| Total | 22 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
{
	return mempool_alloc(prison->cell_pool, gfp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 25 | 100.00% | 2 | 100.00% | 
| Total | 25 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
				struct dm_bio_prison_cell_v2 *cell)
{
	mempool_free(cell, prison->cell_pool);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 24 | 100.00% | 2 | 100.00% | 
| Total | 24 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
static void __setup_new_cell(struct dm_cell_key_v2 *key,
			     struct dm_bio_prison_cell_v2 *cell)
{
	memset(cell, 0, sizeof(*cell));
	memcpy(&cell->key, key, sizeof(cell->key));
	bio_list_init(&cell->bios);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 30 | 55.56% | 3 | 75.00% | 
| Mike Snitzer | 24 | 44.44% | 1 | 25.00% | 
| Total | 54 | 100.00% | 4 | 100.00% | 
static int cmp_keys(struct dm_cell_key_v2 *lhs,
		    struct dm_cell_key_v2 *rhs)
{
	if (lhs->virtual < rhs->virtual)
		return -1;
	if (lhs->virtual > rhs->virtual)
		return 1;
	if (lhs->dev < rhs->dev)
		return -1;
	if (lhs->dev > rhs->dev)
		return 1;
	if (lhs->block_end <= rhs->block_begin)
		return -1;
	if (lhs->block_begin >= rhs->block_end)
		return 1;
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 98 | 98.00% | 4 | 80.00% | 
| Mike Snitzer | 2 | 2.00% | 1 | 20.00% | 
| Total | 100 | 100.00% | 5 | 100.00% | 
/*
 * Returns true if node found, otherwise it inserts a new one.
 */
static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
			     struct dm_cell_key_v2 *key,
			     struct dm_bio_prison_cell_v2 *cell_prealloc,
			     struct dm_bio_prison_cell_v2 **result)
{
	int r;
	struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
	while (*new) {
		struct dm_bio_prison_cell_v2 *cell =
			container_of(*new, struct dm_bio_prison_cell_v2, node);
		r = cmp_keys(key, &cell->key);
		parent = *new;
		if (r < 0)
			new = &((*new)->rb_left);
		else if (r > 0)
			new = &((*new)->rb_right);
		else {
			*result = cell;
			return true;
		}
	}
	__setup_new_cell(key, cell_prealloc);
	*result = cell_prealloc;
	rb_link_node(&cell_prealloc->node, parent, new);
	rb_insert_color(&cell_prealloc->node, &prison->cells);
	return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 156 | 88.64% | 3 | 75.00% | 
| Mike Snitzer | 20 | 11.36% | 1 | 25.00% | 
| Total | 176 | 100.00% | 4 | 100.00% | 
static bool __get(struct dm_bio_prison_v2 *prison,
		  struct dm_cell_key_v2 *key,
		  unsigned lock_level,
		  struct bio *inmate,
		  struct dm_bio_prison_cell_v2 *cell_prealloc,
		  struct dm_bio_prison_cell_v2 **cell)
{
	if (__find_or_insert(prison, key, cell_prealloc, cell)) {
		if ((*cell)->exclusive_lock) {
			if (lock_level <= (*cell)->exclusive_level) {
				bio_list_add(&(*cell)->bios, inmate);
				return false;
			}
		}
		(*cell)->shared_count++;
	} else
		(*cell)->shared_count = 1;
	return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 102 | 91.89% | 3 | 75.00% | 
| Mike Snitzer | 9 | 8.11% | 1 | 25.00% | 
| Total | 111 | 100.00% | 4 | 100.00% | 
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
		    struct dm_cell_key_v2 *key,
		    unsigned lock_level,
		    struct bio *inmate,
		    struct dm_bio_prison_cell_v2 *cell_prealloc,
		    struct dm_bio_prison_cell_v2 **cell_result)
{
	int r;
	unsigned long flags;
	spin_lock_irqsave(&prison->lock, flags);
	r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
	spin_unlock_irqrestore(&prison->lock, flags);
	return r;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 80 | 98.77% | 3 | 75.00% | 
| Mike Snitzer | 1 | 1.23% | 1 | 25.00% | 
| Total | 81 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_cell_get_v2);
static bool __put(struct dm_bio_prison_v2 *prison,
		  struct dm_bio_prison_cell_v2 *cell)
{
	BUG_ON(!cell->shared_count);
	cell->shared_count--;
	// FIXME: shared locks granted above the lock level could starve this
	if (!cell->shared_count) {
		if (cell->exclusive_lock){
			if (cell->quiesce_continuation) {
				queue_work(prison->wq, cell->quiesce_continuation);
				cell->quiesce_continuation = NULL;
			}
		} else {
			rb_erase(&cell->node, &prison->cells);
			return true;
		}
	}
	return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 64 | 68.09% | 3 | 75.00% | 
| Mike Snitzer | 30 | 31.91% | 1 | 25.00% | 
| Total | 94 | 100.00% | 4 | 100.00% | 
bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
		    struct dm_bio_prison_cell_v2 *cell)
{
	bool r;
	unsigned long flags;
	spin_lock_irqsave(&prison->lock, flags);
	r = __put(prison, cell);
	spin_unlock_irqrestore(&prison->lock, flags);
	return r;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mike Snitzer | 33 | 61.11% | 1 | 25.00% | 
| Joe Thornber | 21 | 38.89% | 3 | 75.00% | 
| Total | 54 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_cell_put_v2);
static int __lock(struct dm_bio_prison_v2 *prison,
		  struct dm_cell_key_v2 *key,
		  unsigned lock_level,
		  struct dm_bio_prison_cell_v2 *cell_prealloc,
		  struct dm_bio_prison_cell_v2 **cell_result)
{
	struct dm_bio_prison_cell_v2 *cell;
	if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
		if (cell->exclusive_lock)
			return -EBUSY;
		cell->exclusive_lock = true;
		cell->exclusive_level = lock_level;
		*cell_result = cell;
		// FIXME: we don't yet know what level these shared locks
		// were taken at, so have to quiesce them all.
		return cell->shared_count > 0;
	} else {
		cell = cell_prealloc;
		cell->shared_count = 0;
		cell->exclusive_lock = true;
		cell->exclusive_level = lock_level;
		*cell_result = cell;
	}
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 102 | 85.00% | 2 | 66.67% | 
| Mike Snitzer | 18 | 15.00% | 1 | 33.33% | 
| Total | 120 | 100.00% | 3 | 100.00% | 
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
		    struct dm_cell_key_v2 *key,
		    unsigned lock_level,
		    struct dm_bio_prison_cell_v2 *cell_prealloc,
		    struct dm_bio_prison_cell_v2 **cell_result)
{
	int r;
	unsigned long flags;
	spin_lock_irqsave(&prison->lock, flags);
	r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
	spin_unlock_irqrestore(&prison->lock, flags);
	return r;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 39 | 52.70% | 3 | 75.00% | 
| Mike Snitzer | 35 | 47.30% | 1 | 25.00% | 
| Total | 74 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
static void __quiesce(struct dm_bio_prison_v2 *prison,
		      struct dm_bio_prison_cell_v2 *cell,
		      struct work_struct *continuation)
{
	if (!cell->shared_count)
		queue_work(prison->wq, continuation);
	else
		cell->quiesce_continuation = continuation;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 27 | 61.36% | 2 | 40.00% | 
| Mike Snitzer | 15 | 34.09% | 2 | 40.00% | 
| Christoph Hellwig | 2 | 4.55% | 1 | 20.00% | 
| Total | 44 | 100.00% | 5 | 100.00% | 
void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
			struct dm_bio_prison_cell_v2 *cell,
			struct work_struct *continuation)
{
	unsigned long flags;
	spin_lock_irqsave(&prison->lock, flags);
	__quiesce(prison, cell, continuation);
	spin_unlock_irqrestore(&prison->lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 53 | 100.00% | 2 | 100.00% | 
| Total | 53 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
static int __promote(struct dm_bio_prison_v2 *prison,
		     struct dm_bio_prison_cell_v2 *cell,
		     unsigned new_lock_level)
{
	if (!cell->exclusive_lock)
		return -EINVAL;
	cell->exclusive_level = new_lock_level;
	return cell->shared_count > 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 43 | 100.00% | 2 | 100.00% | 
| Total | 43 | 100.00% | 2 | 100.00% | 
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
			    struct dm_bio_prison_cell_v2 *cell,
			    unsigned new_lock_level)
{
	int r;
	unsigned long flags;
	spin_lock_irqsave(&prison->lock, flags);
	r = __promote(prison, cell, new_lock_level);
	spin_unlock_irqrestore(&prison->lock, flags);
	return r;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 59 | 100.00% | 2 | 100.00% | 
| Total | 59 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
static bool __unlock(struct dm_bio_prison_v2 *prison,
		     struct dm_bio_prison_cell_v2 *cell,
		     struct bio_list *bios)
{
	BUG_ON(!cell->exclusive_lock);
	bio_list_merge(bios, &cell->bios);
	bio_list_init(&cell->bios);
	if (cell->shared_count) {
		cell->exclusive_lock = 0;
		return false;
	}
	rb_erase(&cell->node, &prison->cells);
	return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 41 | 51.25% | 1 | 50.00% | 
| Mike Snitzer | 39 | 48.75% | 1 | 50.00% | 
| Total | 80 | 100.00% | 2 | 100.00% | 
bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
		       struct dm_bio_prison_cell_v2 *cell,
		       struct bio_list *bios)
{
	bool r;
	unsigned long flags;
	spin_lock_irqsave(&prison->lock, flags);
	r = __unlock(prison, cell, bios);
	spin_unlock_irqrestore(&prison->lock, flags);
	return r;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mike Snitzer | 42 | 68.85% | 1 | 50.00% | 
| Joe Thornber | 19 | 31.15% | 1 | 50.00% | 
| Total | 61 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
/*----------------------------------------------------------------*/
int __init dm_bio_prison_init_v2(void)
{
	_cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
	if (!_cell_cache)
		return -ENOMEM;
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mike Snitzer | 27 | 93.10% | 1 | 50.00% | 
| Joe Thornber | 2 | 6.90% | 1 | 50.00% | 
| Total | 29 | 100.00% | 2 | 100.00% | 
void dm_bio_prison_exit_v2(void)
{
	kmem_cache_destroy(_cell_cache);
	_cell_cache = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mike Snitzer | 15 | 93.75% | 1 | 50.00% | 
| Joe Thornber | 1 | 6.25% | 1 | 50.00% | 
| Total | 16 | 100.00% | 2 | 100.00% | 
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Joe Thornber | 1066 | 70.36% | 7 | 70.00% | 
| Mike Snitzer | 447 | 29.50% | 2 | 20.00% | 
| Christoph Hellwig | 2 | 0.13% | 1 | 10.00% | 
| Total | 1515 | 100.00% | 10 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.