cregit-Linux how code gets into the kernel

Release 4.11 drivers/md/dm-table.c

Directory: drivers/md
/*
 * Copyright (C) 2001 Sistina Software (UK) Limited.
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
 *
 * This file is released under the GPL.
 */

#include "dm-core.h"

#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>


#define DM_MSG_PREFIX "table"


#define MAX_DEPTH 16

#define NODE_SIZE L1_CACHE_BYTES

#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))

#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)


struct dm_table {
	
struct mapped_device *md;
	
unsigned type;

	/* btree table */
	
unsigned int depth;
	
unsigned int counts[MAX_DEPTH];	/* in nodes */
	
sector_t *index[MAX_DEPTH];

	
unsigned int num_targets;
	
unsigned int num_allocated;
	
sector_t *highs;
	
struct dm_target *targets;

	
struct target_type *immutable_target_type;

	
bool integrity_supported:1;
	
bool singleton:1;
	
bool all_blk_mq:1;

	/*
         * Indicates the rw permissions for the new logical
         * device.  This should be a combination of FMODE_READ
         * and FMODE_WRITE.
         */
	
fmode_t mode;

	/* a list of devices used by this table */
	
struct list_head devices;

	/* events get handed up using this callback */
	
void (*event_fn)(void *);
	
void *event_context;

	
struct dm_md_mempools *mempools;

	
struct list_head target_callbacks;
};

/*
 * Similar to ceiling(log_size(n))
 */

static unsigned int int_log(unsigned int n, unsigned int base) { int result = 0; while (n > 1) { n = dm_div_up(n, base); result++; } return result; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox4093.02%133.33%
Alasdair G. Kergon24.65%133.33%
Joe Thornber12.33%133.33%
Total43100.00%3100.00%

/* * Calculate the index of the child node of the n'th node k'th key. */
static inline unsigned int get_child(unsigned int n, unsigned int k) { return (n * CHILDREN_PER_NODE) + k; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox2288.00%150.00%
Joe Thornber312.00%150.00%
Total25100.00%2100.00%

/* * Return the n'th node of level l from table t. */
static inline sector_t *get_node(struct dm_table *t, unsigned int l, unsigned int n) { return t->index[l] + (n * KEYS_PER_NODE); }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox3394.29%150.00%
Joe Thornber25.71%150.00%
Total35100.00%2100.00%

/* * Return the highest key that you could lookup from the n'th * node on level l of the btree. */
static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) { for (; l < t->depth - 1; l++) n = get_child(n, CHILDREN_PER_NODE - 1); if (n >= t->counts[l]) return (sector_t) - 1; return get_node(t, l, n)[KEYS_PER_NODE - 1]; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox7597.40%150.00%
Joe Thornber22.60%150.00%
Total77100.00%2100.00%

/* * Fills in a level of the btree based on the highs of the level * below it. */
static int setup_btree_index(unsigned int l, struct dm_table *t) { unsigned int n, k; sector_t *node; for (n = 0U; n < t->counts[l]; n++) { node = get_node(t, l, n); for (k = 0U; k < KEYS_PER_NODE; k++) node[k] = high(t, l + 1, get_child(n, k)); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox8995.70%150.00%
Joe Thornber44.30%150.00%
Total93100.00%2100.00%


void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) { unsigned long size; void *addr; /* * Check that we're not going to overflow. */ if (nmemb > (ULONG_MAX / elem_size)) return NULL; size = nmemb * elem_size; addr = vzalloc(size); return addr; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox5198.08%150.00%
Joe Perches11.92%150.00%
Total52100.00%2100.00%

EXPORT_SYMBOL(dm_vcalloc); /* * highs, and targets are managed as dynamic arrays during a * table load. */
static int alloc_targets(struct dm_table *t, unsigned int num) { sector_t *n_highs; struct dm_target *n_targets; /* * Allocate both the target array and offset array at once. * Append an empty entry to catch sectors beyond the end of * the device. */ n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + sizeof(sector_t)); if (!n_highs) return -ENOMEM; n_targets = (struct dm_target *) (n_highs + num); memset(n_highs, -1, sizeof(*n_highs) * num); vfree(t->highs); t->num_allocated = num; t->highs = n_highs; t->targets = n_targets; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox10994.78%240.00%
Jun'ichi Nomura32.61%120.00%
Andrew Morton21.74%120.00%
Joe Thornber10.87%120.00%
Total115100.00%5100.00%


int dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; INIT_LIST_HEAD(&t->devices); INIT_LIST_HEAD(&t->target_callbacks); if (!num_targets) num_targets = KEYS_PER_NODE; num_targets = dm_round_up(num_targets, KEYS_PER_NODE); if (!num_targets) { kfree(t); return -ENOMEM; } if (alloc_targets(t, num_targets)) { kfree(t); return -ENOMEM; } t->type = DM_TYPE_NONE; t->mode = mode; t->md = md; *result = t; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox7753.85%111.11%
Andrew Morton2316.08%222.22%
Mikulas Patocka1611.19%111.11%
Mike Anderson117.69%111.11%
Neil Brown85.59%111.11%
Mike Snitzer64.20%111.11%
Al Viro10.70%111.11%
Dmitriy Monakhov10.70%111.11%
Total143100.00%9100.00%


static void free_devices(struct list_head *devices, struct mapped_device *md) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, devices) { struct dm_dev_internal *dd = list_entry(tmp, struct dm_dev_internal, list); DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", dm_device_name(md), dd->dm_dev->name); dm_put_table_device(md, dd->dm_dev); kfree(dd); } }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox3949.37%120.00%
Benjamin Marzinski2126.58%120.00%
Jonathan E Brassow911.39%120.00%
Paul Jimenez810.13%120.00%
Mikulas Patocka22.53%120.00%
Total79100.00%5100.00%


void dm_table_destroy(struct dm_table *t) { unsigned int i; if (!t) return; /* free the indexes */ if (t->depth >= 2) vfree(t->index[t->depth - 2]); /* free the targets */ for (i = 0; i < t->num_targets; i++) { struct dm_target *tgt = t->targets + i; if (tgt->type->dtr) tgt->type->dtr(tgt); dm_put_target_type(tgt->type); } vfree(t->highs); /* free the device list */ free_devices(&t->devices, t->md); dm_free_md_mempools(t->mempools); kfree(t); }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox10078.12%112.50%
Joe Thornber97.03%225.00%
Kiyoshi Ueda75.47%112.50%
Alasdair G. Kergon64.69%112.50%
Benjamin Marzinski43.12%112.50%
Will Drewry10.78%112.50%
Mikulas Patocka10.78%112.50%
Total128100.00%8100.00%

/* * See if we've already got a device in the list. */
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) { struct dm_dev_internal *dd; list_for_each_entry (dd, l, list) if (dd->dm_dev->bdev->bd_dev == dev) return dd; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox3880.85%125.00%
Andrew Morton510.64%125.00%
Mikulas Patocka36.38%125.00%
Benjamin Marzinski12.13%125.00%
Total47100.00%4100.00%

/* * If possible, this checks an area of a destination device is invalid. */
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q; struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; sector_t dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; unsigned short logical_block_size_sectors = limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; /* * Some devices exist without request functions, * such as loop devices not yet bound to backing files. * Forbid the use of such devices. */ q = bdev_get_queue(bdev); if (!q || !q->make_request_fn) { DMWARN("%s: %s is not yet initialised: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (!dev_size) return 0; if ((start >= dev_size) || (start + len > dev_size)) { DMWARN("%s: %s too small for target: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (logical_block_size_sectors <= 1) return 0; if (start & (logical_block_size_sectors - 1)) { DMWARN("%s: start=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)start, limits->logical_block_size, bdevname(bdev, b)); return 1; } if (len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)len, limits->logical_block_size, bdevname(bdev, b)); return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer19961.99%440.00%
Milan Broz7122.12%110.00%
Alan Cox309.35%110.00%
Mikulas Patocka103.12%220.00%
Mike Anderson92.80%110.00%
Joe Thornber20.62%110.00%
Total321100.00%10100.00%

/* * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the * device and not to touch the existing bdev field in case * it is accessed concurrently inside dm_table_any_congested(). */
static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) { int r; struct dm_dev *old_dev, *new_dev; old_dev = dd->dm_dev; r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, dd->dm_dev->mode | new_mode, &new_dev); if (r) return r; dd->dm_dev = new_dev; dm_put_table_device(md, old_dev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox3440.00%114.29%
Benjamin Marzinski2529.41%114.29%
Alasdair G. Kergon1517.65%114.29%
Jun'ichi Nomura78.24%114.29%
Mikulas Patocka22.35%114.29%
Kevin Corry11.18%114.29%
Al Viro11.18%114.29%
Total85100.00%7100.00%

/* * Convert the path to a device */
dev_t dm_get_dev_t(const char *path) { dev_t uninitialized_var(dev); struct block_device *bdev; bdev = lookup_bdev(path); if (IS_ERR(bdev)) dev = name_to_dev_t(path); else { dev = bdev->bd_dev; bdput(bdev); } return dev; }

Contributors

PersonTokensPropCommitsCommitProp
DingXiang59100.00%1100.00%
Total59100.00%1100.00%

EXPORT_SYMBOL_GPL(dm_get_dev_t); /* * Add a device to the list, or just increment the usage count if * it's already present. */
int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, struct dm_dev **result) { int r; dev_t dev; struct dm_dev_internal *dd; struct dm_table *t = ti->table; BUG_ON(!t); dev = dm_get_dev_t(path); if (!dev) return -ENODEV; dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { kfree(dd); return r; } atomic_set(&dd->count, 0); list_add(&dd->list, &t->devices); } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { r = upgrade_mode(dd, mode, t->md); if (r) return r; } atomic_inc(&dd->count); *result = dd->dm_dev; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox17478.73%110.00%
Benjamin Marzinski114.98%110.00%
Mike Snitzer104.52%110.00%
Al Viro83.62%220.00%
Jun'ichi Nomura62.71%110.00%
Mikulas Patocka52.26%110.00%
Eric Sesterhenn / Snakebyte31.36%110.00%
Dan Ehrenberg31.36%110.00%
DingXiang10.45%110.00%
Total221100.00%10100.00%

EXPORT_SYMBOL(dm_get_device);
static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; struct request_queue *q = bdev_get_queue(bdev); char b[BDEVNAME_SIZE]; if (unlikely(!q)) { DMWARN("%s: Cannot set limits for nonexistent device %s", dm_device_name(ti->table->md), bdevname(bdev, b)); return 0; } if (bdev_stack_limits(limits, bdev, start) < 0) DMWARN("%s: adding target device %s caused an alignment inconsistency: " "physical_block_size=%u, logical_block_size=%u, " "alignment_offset=%u, start=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), q->limits.physical_block_size, q->limits.logical_block_size, q->limits.alignment_offset, (unsigned long long) start << SECTOR_SHIFT); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer8855.70%545.45%
Alasdair G. Kergon3723.42%19.09%
Alan Cox2415.19%19.09%
Martin K. Petersen63.80%218.18%
Jens Axboe21.27%19.09%
Bryn M. Reeves10.63%19.09%
Total158100.00%11100.00%

/* * Decrement a device's use count and remove it if necessary. */
void dm_put_device(struct dm_target *ti, struct dm_dev *d) { int found = 0; struct list_head *devices = &ti->table->devices; struct dm_dev_internal *dd; list_for_each_entry(dd, devices, list) { if (dd->dm_dev == d) { found = 1; break; } } if (!found) { DMWARN("%s: device %s not in table devices list", dm_device_name(ti->table->md), d->name); return; } if (atomic_dec_and_test(&dd->count)) { dm_put_table_device(ti->table->md, d); list_del(&dd->list); kfree(dd); } }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Marzinski6754.03%125.00%
Alan Cox4233.87%125.00%
Mikulas Patocka108.06%125.00%
Jun'ichi Nomura54.03%125.00%
Total124100.00%4100.00%

EXPORT_SYMBOL(dm_put_device); /* * Checks to see if the target joins onto the end of the table. */
static int adjoin(struct dm_table *table, struct dm_target *ti) { struct dm_target *prev; if (!table->num_targets) return !ti->begin; prev = &table->targets[table->num_targets - 1]; return (ti->begin == (prev->begin + prev->len)); }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox65100.00%1100.00%
Total65100.00%1100.00%

/* * Used to dynamically allocate the arg array. * * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must * process messages even if some device is suspended. These messages have a * small fixed number of arguments. * * On the other hand, dm-switch needs to process bulk data using messages and * excessive use of GFP_NOIO could cause trouble. */
static char **realloc_argv(unsigned *array_size, char **old_argv) { char **argv; unsigned new_size; gfp_t gfp; if (*array_size) { new_size = *array_size * 2; gfp = GFP_KERNEL; } else { new_size = 8; gfp = GFP_NOIO; } argv = kmalloc(new_size * sizeof(*argv), gfp); if (argv) { memcpy(argv, old_argv, *array_size * sizeof(*argv)); *array_size = new_size; } kfree(old_argv); return argv; }

Contributors

PersonTokensPropCommitsCommitProp
Kevin Corry8175.70%150.00%
Mikulas Patocka2624.30%150.00%
Total107100.00%2100.00%

/* * Destructively splits up the argument list to pass to ctr. */
int dm_split_args(int *argc, char ***argvp, char *input) { char *start, *end = input, *out, **argv = NULL; unsigned array_size = 0; *argc = 0; if (!input) { *argvp = NULL; return 0; } argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; while (1) { /* Skip whitespace */ start = skip_spaces(end); if (!*start) break; /* success, we hit the end */ /* 'out' is used to remove any back-quotes */ end = out = start; while (*end) { /* Everything apart from '\0' can be quoted */ if (*end == '\\' && *(end + 1)) { *out++ = *(end + 1); end += 2; continue; } if (isspace(*end)) break; /* end of token */ *out++ = *end++; } /* have we already filled the array ? */ if ((*argc + 1) > array_size) { argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; } /* we know this is whitespace */ if (*end) end++; /* terminate the string and put it in the array */ *out = '\0'; argv[*argc] = start; (*argc)++; } *argvp = argv; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox16368.49%120.00%
Kevin Corry5623.53%120.00%
David Teigland156.30%120.00%
André Goddard Rosa31.26%120.00%
Alasdair G. Kergon10.42%120.00%
Total238100.00%5100.00%

/* * Impose necessary and sufficient conditions on a devices's table such * that any incoming bio which respects its logical_block_size can be * processed successfully. If it falls across the boundary between * two or more targets, the size of each piece it gets split into must * be compatible with the logical_block_size of the target processing it. */
static int validate_hardware_logical_block_alignment(struct dm_table *table, struct queue_limits *limits) { /* * This function uses arithmetic modulo the logical_block_size * (in units of 512-byte sectors). */ unsigned short device_logical_block_size_sects = limits->logical_block_size >> SECTOR_SHIFT; /* * Offset of the start of the next table entry, mod logical_block_size. */ unsigned short next_target_start = 0; /* * Given an aligned bio that extends beyond the end of a * target, how many sectors must the next target handle? */ unsigned short remaining = 0; struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; /* * Check each entry in the table in turn. */ while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); blk_set_stacking_limits(&ti_limits); /* combine all target devices' limits */ if (ti->type->iterate_devices) ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* * If the remaining sectors fall entirely within this * table entry are they compatible with its logical_block_size? */ if (remaining < ti->len && remaining & ((ti_limits.logical_block_size >> SECTOR_SHIFT) - 1)) break; /* Error */ next_target_start = (unsigned short) ((next_target_start + ti->len) & (device_logical_block_size_sects - 1)); remaining = next_target_start ? device_logical_block_size_sects - next_target_start : 0; } if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " "not aligned to h/w logical block size %u", dm_device_name(table->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, limits->logical_block_size); return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer19992.13%450.00%
Andrew Morton136.02%225.00%
Neil Brown31.39%112.50%
Martin K. Petersen10.46%112.50%
Total216100.00%8100.00%


int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params) { int r = -EINVAL, argc; char **argv; struct dm_target *tgt; if (t->singleton) { DMERR("%s: target type %s must appear alone in table", dm_device_name(t->md), t->targets->type->name); return -EINVAL; } BUG_ON(t->num_targets >= t->num_allocated); tgt = t->targets + t->num_targets; memset(tgt, 0, sizeof(*tgt)); if (!len) { DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } tgt->type = dm_get_target_type(type); if (!tgt->type) { DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } if (dm_target_needs_singleton(tgt->type)) { if (t->num_targets) { tgt->error = "singleton target type must appear alone in table"; goto bad; } t->singleton = true; } if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { tgt->error = "target type may not be included in a read-only table"; goto bad; } if (t->immutable_target_type) { if (t->immutable_target_type != tgt->type) { tgt->error = "immutable target type cannot be mixed with other target types"; goto bad; } } else if (dm_target_is_immutable(tgt->type)) { if (t->num_targets) { tgt->error = "immutable target type cannot be mixed with other target types"; goto bad; } t->immutable_target_type = tgt->type; } tgt->table = t; tgt->begin = start; tgt->len = len; tgt->error = "Unknown error"; /* * Does this target adjoin the previous one ? */ if (!adjoin(t, tgt)) { tgt->error = "Gap in table"; goto bad; } r = dm_split_args(&argc, &argv, params); if (r) { tgt->error = "couldn't split parameters (insufficient memory)"; goto bad; } r = tgt->type->ctr(tgt, argc, argv); kfree(argv); if (r) goto bad; t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; if (!tgt->num_discard_bios && tgt->discards_supported) DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", dm_device_name(t->md), type); return 0; bad: DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); dm_put_target_type(tgt->type); return r; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox17937.61%15.56%
Alasdair G. Kergon15833.19%633.33%
Joe Thornber449.24%316.67%
Andrew Morton285.88%211.11%
tang.junhui275.67%15.56%
Mike Snitzer245.04%316.67%
Mikulas Patocka81.68%15.56%
Kevin Corry81.68%15.56%
Total476100.00%18100.00%

/* * Target argument parsing helpers. */
static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error, unsigned grouped) { const char *arg_str = dm_shift_arg(arg_set); char dummy; if (!arg_str || (sscanf(arg_str, "%u%c", value, &dummy) != 1) || (*value < arg->min) || (*value > arg->max) || (grouped && arg_set->argc < *value)) { *error = arg->error; return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer10093.46%150.00%
Mikulas Patocka76.54%150.00%
Total107100.00%2100.00%


int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer38100.00%1100.00%
Total38100.00%1100.00%

EXPORT_SYMBOL(dm_read_arg);
int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer38100.00%1100.00%
Total38100.00%1100.00%

EXPORT_SYMBOL(dm_read_arg_group);
const char *dm_shift_arg(struct dm_arg_set *as) { char *r; if (as->argc) { as->argc--; r = *as->argv; as->argv++; return r; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer47100.00%1100.00%
Total47100.00%1100.00%

EXPORT_SYMBOL(dm_shift_arg);
void dm_consume_args(struct dm_arg_set *as, unsigned num_args) { BUG_ON(as->argc < num_args); as->argc -= num_args; as->argv += num_args; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(unsigned table_type) { return (table_type == DM_TYPE_BIO_BASED || table_type == DM_TYPE_DAX_BIO_BASED); }

Contributors

PersonTokensPropCommitsCommitProp
Toshi Kani20100.00%1100.00%
Total20100.00%1100.00%


static bool __table_type_request_based(unsigned table_type) { return (table_type == DM_TYPE_REQUEST_BASED || table_type == DM_TYPE_MQ_REQUEST_BASED); }

Contributors

PersonTokensPropCommitsCommitProp
Jun'ichi Nomura20100.00%1100.00%
Total20100.00%1100.00%


void dm_table_set_type(struct dm_table *t, unsigned type) { t->type = type; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL_GPL(dm_table_set_type);
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_dax(q); }

Contributors

PersonTokensPropCommitsCommitProp
Toshi Kani46100.00%1100.00%
Total46100.00%1100.00%


static bool dm_table_supports_dax(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* Ensure that all targets support DAX. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->type->direct_access) return false; if (!ti->type->iterate_devices || !ti->type->iterate_devices(ti, device_supports_dax, NULL)) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Toshi Kani84100.00%1100.00%
Total84100.00%1100.00%


static int dm_table_determine_type(struct dm_table *t) { unsigned i; unsigned bio_based = 0, request_based = 0, hybrid = 0; unsigned sq_count = 0, mq_count = 0; struct dm_target *tgt; struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); unsigned live_md_type = dm_get_md_type(t->md); if (t->type != DM_TYPE_NONE) { /* target already set the table's type */ if (t->type == DM_TYPE_BIO_BASED) return 0; BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); goto verify_rq_based; } for (i = 0; i < t->num_targets; i++) { tgt = t->targets + i; if (dm_target_hybrid(tgt)) hybrid = 1; else if (dm_target_request_based(tgt)) request_based = 1; else bio_based = 1; if (bio_based && request_based) { DMWARN("Inconsistent table: different target types" " can't be mixed up"); return -EINVAL; } } if (hybrid && !bio_based && !request_based) { /* * The targets can work either way. * Determine the type from the live device. * Default to bio-based if device is new. */ if (__table_type_request_based(live_md_type)) request_based = 1; else bio_based = 1; } if (bio_based) { /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; if (dm_table_supports_dax(t) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) t->type = DM_TYPE_DAX_BIO_BASED; return 0; } BUG_ON(!request_based); /* No targets in this table */ /* * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by * having a compatible target use dm_table_set_type. */ t->type = DM_TYPE_REQUEST_BASED; verify_rq_based: /* * Request-based dm supports only tables that have a single target now. * To support multiple targets, request splitting support is needed, * and that needs lots of changes in the block-layer. * (e.g. request completion process for partial completion.) */ if (t->num_targets > 1) { DMWARN("Request-based dm doesn't support multiple targets yet"); return -EINVAL; } if (list_empty(devices)) { int srcu_idx; struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); /* inherit live table's type and all_blk_mq */ if (live_table) { t->type = live_table->type; t->all_blk_mq = live_table->all_blk_mq; } dm_put_live_table(t->md, srcu_idx); return 0; } /* Non-request-stackable devices can't be used for request-based dm */ list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); if (!blk_queue_stackable(q)) { DMERR("table load rejected: including" " non-request-stackable devices"); return -EINVAL; } if (q->mq_ops) mq_count++; else sq_count++; } if (sq_count && mq_count) { DMERR("table load rejected: not all devices are blk-mq request-stackable"); return -EINVAL; } t->all_blk_mq = mq_count > 0; if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) { DMERR("table load rejected: all devices are not blk-mq request-stackable"); return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer23151.56%650.00%
Kiyoshi Ueda14732.81%18.33%
Bart Van Assche449.82%216.67%
Toshi Kani224.91%18.33%
Jun'ichi Nomura30.67%18.33%
Will Drewry10.22%18.33%
Total448100.00%12100.00%


unsigned dm_table_get_type(struct dm_table *t) { return t->type; }

Contributors

PersonTokensPropCommitsCommitProp
Kiyoshi Ueda15100.00%1100.00%
Total15100.00%1100.00%


struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) { return t->immutable_target_type; }

Contributors

PersonTokensPropCommitsCommitProp
Alasdair G. Kergon17100.00%1100.00%
Total17100.00%1100.00%


struct dm_target *dm_table_get_immutable_target(struct dm_table *t) { /* Immutable target is implicitly a singleton */ if (t->num_targets > 1 || !dm_target_is_immutable(t->targets[0].type)) return NULL; return t->targets; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer42100.00%1100.00%
Total42100.00%1100.00%


struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) { struct dm_target *uninitialized_var(ti); unsigned i = 0; while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (dm_target_is_wildcard(ti->type)) return ti; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer61100.00%1100.00%
Total61100.00%1100.00%


bool dm_table_bio_based(struct dm_table *t) { return __table_type_bio_based(dm_table_get_type(t)); }

Contributors

PersonTokensPropCommitsCommitProp
Toshi Kani19100.00%1100.00%
Total19100.00%1100.00%


bool dm_table_request_based(struct dm_table *t) { return __table_type_request_based(dm_table_get_type(t)); }

Contributors

PersonTokensPropCommitsCommitProp
Kiyoshi Ueda1368.42%133.33%
Jun'ichi Nomura421.05%133.33%
Mike Snitzer210.53%133.33%
Total19100.00%3100.00%


bool dm_table_all_blk_mq_devices(struct dm_table *t) { return t->all_blk_mq; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer1386.67%266.67%
Kiyoshi Ueda213.33%133.33%
Total15100.00%3100.00%


static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { unsigned type = dm_table_get_type(t); unsigned per_io_data_size = 0; struct dm_target *tgt; unsigned i; if (unlikely(type == DM_TYPE_NONE)) { DMWARN("no table type is set, can't allocate mempools"); return -EINVAL; } if (__table_type_bio_based(type)) for (i = 0; i < t->num_targets; i++) { tgt = t->targets + i; per_io_data_size = max(per_io_data_size, tgt->per_io_data_size); } t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size); if (!t->mempools) return -ENOMEM; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer4534.35%444.44%
Mikulas Patocka4030.53%222.22%
Kiyoshi Ueda4030.53%111.11%
Toshi Kani32.29%111.11%
Martin K. Petersen32.29%111.11%
Total131100.00%9100.00%


void dm_table_free_md_mempools(struct dm_table *t) { dm_free_md_mempools(t->mempools); t->mempools = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Kiyoshi Ueda23100.00%1100.00%
Total23100.00%1100.00%


struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) { return t->mempools; }

Contributors

PersonTokensPropCommitsCommitProp
Kiyoshi Ueda17100.00%1100.00%
Total17100.00%1100.00%


static int setup_indexes(struct dm_table *t) { int i; unsigned int total = 0; sector_t *indexes; /* allocate the space for *all* the indexes */ for (i = t->depth - 2; i >= 0; i--) { t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); total += t->counts[i]; } indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); if (!indexes) return -ENOMEM; /* set up internal nodes, bottom-up */ for (i = t->depth - 2; i >= 0; i--) { t->index[i] = indexes; indexes += (KEYS_PER_NODE * t->counts[i]); setup_btree_index(i, t); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox14897.37%250.00%
Joe Thornber42.63%250.00%
Total152100.00%4100.00%

/* * Builds the btree to index the map. */
static int dm_table_build_index(struct dm_table *t) { int r = 0; unsigned int leaf_nodes; /* how many indexes will the btree have ? */ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); /* leaf layer has already been set up */ t->counts[t->depth - 1] = leaf_nodes; t->index[t->depth - 1] = t->highs; if (t->depth >= 2) r = setup_indexes(t); return r; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox8592.39%125.00%
Joe Thornber55.43%250.00%
Will Drewry22.17%125.00%
Total92100.00%4100.00%


static bool integrity_profile_exists(struct gendisk *disk) { return !!blk_get_integrity(disk); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen19100.00%1100.00%
Total19100.00%1100.00%

/* * Get a disk whose integrity profile reflects the table's profile. * Returns NULL if integrity support was inconsistent or unavailable. */
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) { struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; struct gendisk *prev_disk = NULL, *template_disk = NULL; list_for_each_entry(dd, devices, list) { template_disk = dd->dm_dev->bdev->bd_disk; if (!integrity_profile_exists(template_disk)) goto no_integrity; else if (prev_disk && blk_integrity_compare(prev_disk, template_disk) < 0) goto no_integrity; prev_disk = template_disk; } return template_disk; no_integrity: if (prev_disk) DMWARN("%s: integrity not set: %s and %s profile mismatch", dm_device_name(t->md), prev_disk->disk_name, template_disk->disk_name); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer8970.63%125.00%
Will Drewry3527.78%125.00%
Benjamin Marzinski10.79%125.00%
Martin K. Petersen10.79%125.00%
Total126100.00%4100.00%

/* * Register the mapped device for blk_integrity support if the * underlying devices have an integrity profile. But all devices may * not have matching profiles (checking all devices isn't reliable * during table load because this table may use other DM device(s) which * must be resumed before they will have an initialized integity * profile). Consequently, stacked DM devices force a 2 stage integrity * profile validation: First pass during table load, final pass during * resume. */
static int dm_table_register_integrity(struct dm_table *t) { struct mapped_device *md = t->md; struct gendisk *template_disk = NULL; template_disk = dm_table_get_integrity_disk(t); if (!template_disk) return 0; if (!integrity_profile_exists(dm_disk(md))) { t->integrity_supported = true; /* * Register integrity profile during table load; we can do * this because the final profile must match during resume. */ blk_integrity_register(dm_disk(md), blk_get_integrity(template_disk)); return 0; } /* * If DM device already has an initialized integrity * profile the new profile should not conflict. */ if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { DMWARN("%s: conflict with existing integrity profile: " "%s profile mismatch", dm_device_name(t->md), template_disk->disk_name); return 1; } /* Preserve existing integrity profile */ t->integrity_supported = true; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer8467.20%240.00%
Martin K. Petersen2620.80%240.00%
Will Drewry1512.00%120.00%
Total125100.00%5100.00%

/* * Prepares the table for use by building the indices, * setting the type, and allocating mempools. */
int dm_table_complete(struct dm_table *t) { int r; r = dm_table_determine_type(t); if (r) { DMERR("unable to determine table type"); return r; } r = dm_table_build_index(t); if (r) { DMERR("unable to build btrees"); return r; } r = dm_table_register_integrity(t); if (r) { DMERR("could not register integrity profile."); return r; } r = dm_table_alloc_md_mempools(t, t->md); if (r) DMERR("unable to allocate mempools"); return r; }

Contributors

PersonTokensPropCommitsCommitProp
Will Drewry9292.93%125.00%
Mike Snitzer66.06%250.00%
Martin K. Petersen11.01%125.00%
Total99100.00%4100.00%

static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context) { mutex_lock(&_event_lock); t->event_fn = fn; t->event_context = context; mutex_unlock(&_event_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4695.83%150.00%
Arjan van de Ven24.17%150.00%
Total48100.00%2100.00%


void dm_table_event(struct dm_table *t) { /* * You can no longer call dm_table_event() from interrupt * context, use a bottom half instead. */ BUG_ON(in_interrupt()); mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); mutex_unlock(&_event_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2659.09%250.00%
Alan Cox1636.36%125.00%
Arjan van de Ven24.55%125.00%
Total44100.00%4100.00%

EXPORT_SYMBOL(dm_table_event);
sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox32100.00%1100.00%
Total32100.00%1100.00%

EXPORT_SYMBOL(dm_table_get_size);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) { if (index >= t->num_targets) return NULL; return t->targets + index; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox3294.12%133.33%
Joe Thornber12.94%133.33%
Milan Broz12.94%133.33%
Total34100.00%3100.00%

/* * Search the btree for the correct target. * * Caller should check returned pointer with dm_target_is_valid() * to trap I/O beyond end of device. */
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) { unsigned int l, n = 0, k = 0; sector_t *node; for (l = 0; l < t->depth; l++) { n = get_child(n, k); node = get_node(t, l, n); for (k = 0; k < KEYS_PER_NODE; k++) if (node[k] >= sector) break; } return &t->targets[(KEYS_PER_NODE * n) + k]; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox10599.06%150.00%
Joe Thornber10.94%150.00%
Total106100.00%2100.00%


static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { unsigned *num_devices = data; (*num_devices)++; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer41100.00%1100.00%
Total41100.00%1100.00%

/* * Check whether a table has no data devices attached using each * target's iterate_devices method. * Returns false if the result is unknown because a target doesn't * support iterate_devices. */
bool dm_table_has_no_data_devices(struct dm_table *table) { struct dm_target *uninitialized_var(ti); unsigned i = 0, num_devices = 0; while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); if (!ti->type->iterate_devices) return false; ti->type->iterate_devices(ti, count_device, &num_devices); if (num_devices) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer84100.00%1100.00%
Total84100.00%1100.00%

/* * Establish the new table's queue_limits and validate them. */
int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits *limits) { struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; blk_set_stacking_limits(limits); while (i < dm_table_get_num_targets(table)) { blk_set_stacking_limits(&ti_limits); ti = dm_table_get_target(table, i++); if (!ti->type->iterate_devices) goto combine_limits; /* * Combine queue limits of all the devices this target uses. */ ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* Set I/O hints portion of queue limits */ if (ti->type->io_hints) ti->type->io_hints(ti, &ti_limits); /* * Check each device area is consistent with the target's * overall queue limits. */ if (ti->type->iterate_devices(ti, device_area_is_invalid, &ti_limits)) return -EINVAL; combine_limits: /* * Merge this target's queue limits into the overall limits * for the table. */ if (blk_stack_limits(limits, &ti_limits, 0) < 0) DMWARN("%s: adding target device " "(start sect %llu len %llu) " "caused an alignment inconsistency", dm_device_name(table->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); } return validate_hardware_logical_block_alignment(table, limits); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer18597.37%240.00%
Martin K. Petersen42.11%240.00%
Mikulas Patocka10.53%120.00%
Total190100.00%5100.00%

/* * Verify that all devices have an integrity profile that matches the * DM device's registered integrity profile. If the profiles don't * match then unregister the DM device's integrity profile. */
static void dm_table_verify_integrity(struct dm_table *t) { struct gendisk *template_disk = NULL; if (t->integrity_supported) { /* * Verify that the original integrity profile * matches all the devices in this table. */ template_disk = dm_table_get_integrity_disk(t); if (template_disk && blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) return; } if (integrity_profile_exists(dm_disk(t->md))) { DMWARN("%s: unable to establish an integrity profile", dm_device_name(t->md)); blk_integrity_unregister(dm_disk(t->md)); } }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen6471.91%250.00%
Mike Snitzer2528.09%250.00%
Total89100.00%4100.00%


static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { unsigned long flush = (unsigned long) data; struct request_queue *q = bdev_get_queue(dev->bdev); return q && (q->queue_flags & flush); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer5694.92%150.00%
Jens Axboe35.08%150.00%
Total59100.00%2100.00%


static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) { struct dm_target *ti; unsigned i = 0; /* * Require at least one underlying device to support flushes. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting flushes must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_flush_bios) continue; if (ti->flush_supported) return true; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer7882.11%120.00%
Joe Thornber88.42%120.00%
Jens Axboe55.26%120.00%
Joe Perches33.16%120.00%
Alasdair G. Kergon11.05%120.00%
Total95100.00%5100.00%


static bool dm_table_discard_zeroes_data(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* Ensure that all targets supports discard_zeroes_data. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (ti->discard_zeroes_data_unsupported) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Milan Broz5396.36%150.00%
Joe Perches23.64%150.00%
Total55100.00%2100.00%


static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_nonrot(q); }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines46100.00%1100.00%
Total46100.00%1100.00%


static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && !blk_queue_add_random(q); }

Contributors

PersonTokensPropCommitsCommitProp
Milan Broz47100.00%1100.00%
Total47100.00%1100.00%


static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); }

Contributors

PersonTokensPropCommitsCommitProp
Jeff Moyer52100.00%1100.00%
Total52100.00%1100.00%


static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { struct dm_target *ti; unsigned i = 0; while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->type->iterate_devices || !ti->type->iterate_devices(ti, func, NULL)) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines6790.54%133.33%
Milan Broz56.76%133.33%
Joe Perches22.70%133.33%
Total74100.00%3100.00%


static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && !q->limits.max_write_same_sectors; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer48100.00%1100.00%
Total48100.00%1100.00%


static bool dm_table_supports_write_same(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_write_same_bios) return false; if (!ti->type->iterate_devices || ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer7998.75%150.00%
Alasdair G. Kergon11.25%150.00%
Total80100.00%2100.00%


static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_discard(q); }

Contributors

PersonTokensPropCommitsCommitProp
Mikulas Patocka46100.00%1100.00%
Total46100.00%1100.00%


static bool dm_table_supports_discards(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* * Unless any target used by the table set discards_supported, * require at least one underlying device to support discards. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting discard selectively must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_discard_bios) continue; if (ti->discards_supported) return true; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_discard_capable, NULL)) return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Mikulas Patocka8496.55%150.00%
Joe Perches33.45%150.00%
Total87100.00%2100.00%


void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { bool wc = false, fua = false; /* * Copy table's limits to the DM device's request_queue */ q->limits = *limits; if (!dm_table_supports_discards(t)) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); else queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { wc = true; if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) fua = true; } blk_queue_write_cache(q, wc, fua); if (!dm_table_discard_zeroes_data(t)) q->limits.discard_zeroes_data = 0; /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); dm_table_verify_integrity(t); /* * Determine whether or not this queue's I/O timings contribute * to the entropy pool, Only request-based targets use this. * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); /* * QUEUE_FLAG_STACKABLE must be set after all queue settings are * visible to other CPUs because, once the flag is set, incoming bios * are processed by request-based dm, which refers to the queue * settings. * Until the flag set, bios are passed to bio-based dm and queued to * md->deferred where queue settings are not needed yet. * Those bios are passed to request-based dm at the resume time. */ smp_mb(); if (dm_table_request_based(t)) queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer7231.17%633.33%
Milan Broz4218.18%211.11%
Jens Axboe2711.69%211.11%
Jeff Moyer2410.39%15.56%
Mandeep Singh Baines219.09%15.56%
Kiyoshi Ueda187.79%15.56%
Alan Cox187.79%15.56%
Martin K. Petersen52.16%211.11%
Neil Brown20.87%15.56%
Vasily Averin20.87%15.56%
Total231100.00%18100.00%


unsigned int dm_table_get_num_targets(struct dm_table *t) { return t->num_targets; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox16100.00%1100.00%
Total16100.00%1100.00%


struct list_head *dm_table_get_devices(struct dm_table *t) { return &t->devices; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox18100.00%1100.00%
Total18100.00%1100.00%


fmode_t dm_table_get_mode(struct dm_table *t) { return t->mode; }

Contributors

PersonTokensPropCommitsCommitProp
Alan Cox1493.33%150.00%
Al Viro16.67%150.00%
Total15100.00%2100.00%

EXPORT_SYMBOL(dm_table_get_mode); enum suspend_mode { PRESUSPEND, PRESUSPEND_UNDO, POSTSUSPEND, };
static void suspend_targets(struct dm_table *t, enum suspend_mode mode) { int i = t->num_targets; struct dm_target *ti = t->targets; while (i--) { switch (mode) { case PRESUSPEND: if (ti->type->presuspend) ti->type->presuspend(ti); break; case PRESUSPEND_UNDO: if (ti->type->presuspend_undo) ti->type->presuspend_undo(ti); break; case POSTSUSPEND: if (ti->type->postsuspend) ti->type->postsuspend(ti); break; } ti++; } }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer3935.45%133.33%
Joe Thornber3733.64%133.33%
Alasdair G. Kergon3430.91%133.33%
Total110100.00%3100.00%


void dm_table_presuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, PRESUSPEND); }

Contributors

PersonTokensPropCommitsCommitProp
Alasdair G. Kergon1982.61%266.67%
Mike Snitzer417.39%133.33%
Total23100.00%3100.00%


void dm_table_presuspend_undo_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, PRESUSPEND_UNDO); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer2086.96%133.33%
Alasdair G. Kergon28.70%133.33%
Adrian Bunk14.35%133.33%
Total23100.00%3100.00%


void dm_table_postsuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, POSTSUSPEND); }

Contributors

PersonTokensPropCommitsCommitProp
Alasdair G. Kergon2086.96%240.00%
Adrian Bunk14.35%120.00%
Mike Snitzer14.35%120.00%
Joe Thornber14.35%120.00%
Total23100.00%5100.00%


int dm_table_resume_targets(struct dm_table *t) { int i, r = 0; for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (!ti->type->preresume) continue; r = ti->type->preresume(ti); if (r) { DMERR("%s: %s: preresume failed, error = %d", dm_device_name(t->md), ti->type->name, r); return r; } } for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (ti->type->resume) ti->type->resume(ti); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Milan Broz6444.76%133.33%
Joe Thornber5739.86%133.33%
Mike Snitzer2215.38%133.33%
Total143100.00%3100.00%


void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) { list_add(&cb->list, &t->target_callbacks); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown28100.00%1100.00%
Total28100.00%1100.00%

EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
int dm_table_any_congested(struct dm_table *t, int bdi_bits) { struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); struct dm_target_callbacks *cb; int r = 0; list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); char b[BDEVNAME_SIZE]; if (likely(q)) r |= bdi_congested(q->backing_dev_info, bdi_bits); else DMWARN_LIMIT("%s: any_congested: nonexistent device %s", dm_device_name(t->md), bdevname(dd->dm_dev->bdev, b)); } list_for_each_entry(cb, &t->target_callbacks, list) if (cb->congested_fn) r |= cb->congested_fn(cb, bdi_bits); return r; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton5237.68%114.29%
Alasdair G. Kergon3626.09%114.29%
Neil Brown3021.74%114.29%
Paul Jimenez1410.14%114.29%
Benjamin Marzinski21.45%114.29%
Jens Axboe21.45%114.29%
Mikulas Patocka21.45%114.29%
Total138100.00%7100.00%


struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Anderson17100.00%1100.00%
Total17100.00%1100.00%

EXPORT_SYMBOL(dm_table_get_md);
void dm_table_run_md_queue_async(struct dm_table *t) { struct mapped_device *md; struct request_queue *queue; unsigned long flags; if (!dm_table_request_based(t)) return; md = dm_table_get_md(t); queue = dm_get_md_queue(md); if (queue) { if (queue->mq_ops) blk_mq_run_hw_queues(queue, true); else { spin_lock_irqsave(queue->queue_lock, flags); blk_run_queue_async(queue); spin_unlock_irqrestore(queue->queue_lock, flags); } } }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer92100.00%2100.00%
Total92100.00%2100.00%

EXPORT_SYMBOL(dm_table_run_md_queue_async);

Overall Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer231231.69%3627.48%
Alan Cox196426.92%21.53%
Alasdair G. Kergon3634.98%129.16%
Kiyoshi Ueda2903.97%10.76%
Milan Broz2833.88%53.82%
Mikulas Patocka2693.69%118.40%
Andrew Morton2172.97%118.40%
Toshi Kani1942.66%10.76%
Joe Thornber1862.55%96.87%
Will Drewry1472.01%10.76%
Kevin Corry1462.00%21.53%
Martin K. Petersen1371.88%64.58%
Mandeep Singh Baines1341.84%10.76%
Benjamin Marzinski1321.81%10.76%
Neil Brown801.10%32.29%
Jeff Moyer761.04%10.76%
DingXiang660.90%10.76%
Jun'ichi Nomura490.67%32.29%
Bart Van Assche440.60%21.53%
Mike Anderson420.58%21.53%
Jens Axboe390.53%32.29%
tang.junhui270.37%10.76%
Paul Jimenez220.30%10.76%
David Teigland150.21%10.76%
Al Viro120.16%21.53%
Joe Perches110.15%21.53%
Jonathan E Brassow90.12%10.76%
Arjan van de Ven80.11%10.76%
André Goddard Rosa60.08%10.76%
Dan Ehrenberg60.08%10.76%
Eric Sesterhenn / Snakebyte30.04%10.76%
Adrian Bunk20.03%10.76%
Vasily Averin20.03%10.76%
Dmitriy Monakhov10.01%10.76%
Arun Sharma10.01%10.76%
Bryn M. Reeves10.01%10.76%
Total7296100.00%131100.00%
Directory: drivers/md
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.