Release 4.18 block/bio.c
/*
* Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public Licens
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*
*/
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/uio.h>
#include <linux/iocontext.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <trace/events/block.h>
#include "blk.h"
/*
* Test patch to inline a certain number of bi_io_vec's inside the bio
* itself, to shrink a bio data allocation from two mempool calls to one
*/
#define BIO_INLINE_VECS 4
/*
* if you change this list, also change bvec_alloc or things will
* break badly! cannot be bigger than what you can fit into an
* unsigned short
*/
#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
};
#undef BV
/*
* fs_bio_set is the bio_set containing bio and iovec memory pools used by
* IO code that does not need private memory pools.
*/
struct bio_set fs_bio_set;
EXPORT_SYMBOL(fs_bio_set);
/*
* Our slab pool management
*/
struct bio_slab {
struct kmem_cache *slab;
unsigned int slab_ref;
unsigned int slab_size;
char name[8];
};
static DEFINE_MUTEX(bio_slab_lock);
static struct bio_slab *bio_slabs;
static unsigned int bio_slab_nr, bio_slab_max;
static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
{
unsigned int sz = sizeof(struct bio) + extra_size;
struct kmem_cache *slab = NULL;
struct bio_slab *bslab, *new_bio_slabs;
unsigned int new_bio_slab_max;
unsigned int i, entry = -1;
mutex_lock(&bio_slab_lock);
i = 0;
while (i < bio_slab_nr) {
bslab = &bio_slabs[i];
if (!bslab->slab && entry == -1)
entry = i;
else if (bslab->slab_size == sz) {
slab = bslab->slab;
bslab->slab_ref++;
break;
}
i++;
}
if (slab)
goto out_unlock;
if (bio_slab_nr == bio_slab_max && entry == -1) {
new_bio_slab_max = bio_slab_max << 1;
new_bio_slabs = krealloc(bio_slabs,
new_bio_slab_max * sizeof(struct bio_slab),
GFP_KERNEL);
if (!new_bio_slabs)
goto out_unlock;
bio_slab_max = new_bio_slab_max;
bio_slabs = new_bio_slabs;
}
if (entry == -1)
entry = bio_slab_nr++;
bslab = &bio_slabs[entry];
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
SLAB_HWCACHE_ALIGN, NULL);
if (!slab)
goto out_unlock;
bslab->slab = slab;
bslab->slab_ref = 1;
bslab->slab_size = sz;
out_unlock:
mutex_unlock(&bio_slab_lock);
return slab;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 247 | 91.14% | 1 | 20.00% |
Anna Leuschner | 12 | 4.43% | 1 | 20.00% |
Alexey Khoroshilov | 9 | 3.32% | 1 | 20.00% |
Thiago Farina | 2 | 0.74% | 1 | 20.00% |
Mikulas Patocka | 1 | 0.37% | 1 | 20.00% |
Total | 271 | 100.00% | 5 | 100.00% |
static void bio_put_slab(struct bio_set *bs)
{
struct bio_slab *bslab = NULL;
unsigned int i;
mutex_lock(&bio_slab_lock);
for (i = 0; i < bio_slab_nr; i++) {
if (bs->bio_slab == bio_slabs[i].slab) {
bslab = &bio_slabs[i];
break;
}
}
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
goto out;
WARN_ON(!bslab->slab_ref);
if (--bslab->slab_ref)
goto out;
kmem_cache_destroy(bslab->slab);
bslab->slab = NULL;
out:
mutex_unlock(&bio_slab_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 117 | 97.50% | 1 | 50.00% |
Andi Kleen | 3 | 2.50% | 1 | 50.00% |
Total | 120 | 100.00% | 2 | 100.00% |
unsigned int bvec_nr_vecs(unsigned short idx)
{
return bvec_slabs[idx].nr_vecs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin K. Petersen | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
{
if (!idx)
return;
idx--;
BIO_BUG_ON(idx >= BVEC_POOL_NR);
if (idx == BVEC_POOL_MAX) {
mempool_free(bv, pool);
} else {
struct biovec_slab *bvs = bvec_slabs + idx;
kmem_cache_free(bvs->slab, bv);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 53 | 75.71% | 1 | 33.33% |
Christoph Hellwig | 13 | 18.57% | 1 | 33.33% |
Kent Overstreet | 4 | 5.71% | 1 | 33.33% |
Total | 70 | 100.00% | 3 | 100.00% |
struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
mempool_t *pool)
{
struct bio_vec *bvl;
/*
* see comment near bvec_array define!
*/
switch (nr) {
case 1:
*idx = 0;
break;
case 2 ... 4:
*idx = 1;
break;
case 5 ... 16:
*idx = 2;
break;
case 17 ... 64:
*idx = 3;
break;
case 65 ... 128:
*idx = 4;
break;
case 129 ... BIO_MAX_PAGES:
*idx = 5;
break;
default:
return NULL;
}
/*
* idx now points to the pool we want to allocate from. only the
* 1-vec entry pool is mempool backed.
*/
if (*idx == BVEC_POOL_MAX) {
fallback:
bvl = mempool_alloc(pool, gfp_mask);
} else {
struct biovec_slab *bvs = bvec_slabs + *idx;
gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
/*
* Make this allocation restricted and don't dump info on
* allocation failures, since we'll fallback to the mempool
* in case of failure.
*/
__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
/*
* Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
* is set, retry with the 1-entry mempool
*/
bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
*idx = BVEC_POOL_MAX;
goto fallback;
}
}
(*idx)++;
return bvl;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 101 | 50.00% | 3 | 27.27% |
Jens Axboe | 79 | 39.11% | 3 | 27.27% |
Christoph Hellwig | 8 | 3.96% | 1 | 9.09% |
Dave Olien | 6 | 2.97% | 1 | 9.09% |
Kent Overstreet | 4 | 1.98% | 1 | 9.09% |
Mel Gorman | 3 | 1.49% | 1 | 9.09% |
Al Viro | 1 | 0.50% | 1 | 9.09% |
Total | 202 | 100.00% | 11 | 100.00% |
void bio_uninit(struct bio *bio)
{
bio_disassociate_task(bio);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 14 | 93.33% | 1 | 50.00% |
Jens Axboe | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(bio_uninit);
static void bio_free(struct bio *bio)
{
struct bio_set *bs = bio->bi_pool;
void *p;
bio_uninit(bio);
if (bs) {
bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
/*
* If we have front padding, adjust the bio pointer before freeing
*/
p = bio;
p -= bs->front_pad;
mempool_free(p, &bs->bio_pool);
} else {
/* Bio was allocated by bio_kmalloc() */
kfree(bio);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 30 | 36.59% | 3 | 23.08% |
Jens Axboe | 23 | 28.05% | 3 | 23.08% |
Linus Torvalds | 20 | 24.39% | 3 | 23.08% |
Peter Osterlund | 4 | 4.88% | 1 | 7.69% |
H Hartley Sweeten | 3 | 3.66% | 1 | 7.69% |
Dave Olien | 1 | 1.22% | 1 | 7.69% |
Christoph Hellwig | 1 | 1.22% | 1 | 7.69% |
Total | 82 | 100.00% | 13 | 100.00% |
/*
* Users of this function have their own bio allocation. Subsequently,
* they must remember to pair any call to bio_init() with bio_uninit()
* when IO has completed, or when the bio is released.
*/
void bio_init(struct bio *bio, struct bio_vec *table,
unsigned short max_vecs)
{
memset(bio, 0, sizeof(*bio));
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
bio->bi_io_vec = table;
bio->bi_max_vecs = max_vecs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 21 | 32.81% | 2 | 25.00% |
Lei Ming | 21 | 32.81% | 1 | 12.50% |
Jens Axboe | 13 | 20.31% | 4 | 50.00% |
Kent Overstreet | 9 | 14.06% | 1 | 12.50% |
Total | 64 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(bio_init);
/**
* bio_reset - reinitialize a bio
* @bio: bio to reset
*
* Description:
* After calling bio_reset(), @bio will be in the same state as a freshly
* allocated bio returned bio bio_alloc_bioset() - the only fields that are
* preserved are the ones that are initialized by bio_alloc_bioset(). See
* comment in struct bio.
*/
void bio_reset(struct bio *bio)
{
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 53 | 96.36% | 2 | 50.00% |
Jens Axboe | 2 | 3.64% | 2 | 50.00% |
Total | 55 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bio_reset);
static struct bio *__bio_chain_endio(struct bio *bio)
{
struct bio *parent = bio->bi_private;
if (!parent->bi_status)
parent->bi_status = bio->bi_status;
bio_put(bio);
return parent;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 32 | 71.11% | 4 | 80.00% |
Kent Overstreet | 13 | 28.89% | 1 | 20.00% |
Total | 45 | 100.00% | 5 | 100.00% |
static void bio_chain_endio(struct bio *bio)
{
bio_endio(__bio_chain_endio(bio));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 14 | 73.68% | 1 | 50.00% |
Kent Overstreet | 5 | 26.32% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
/**
* bio_chain - chain bio completions
* @bio: the target bio
* @parent: the @bio's parent bio
*
* The caller won't have a bi_end_io called when @bio completes - instead,
* @parent's bi_end_io won't be called until both @parent and @bio have
* completed; the chained bio will also be freed when it completes.
*
* The caller must not set bi_private or bi_end_io in @bio.
*/
void bio_chain(struct bio *bio, struct bio *parent)
{
BUG_ON(bio->bi_private || bio->bi_end_io);
bio->bi_private = parent;
bio->bi_end_io = bio_chain_endio;
bio_inc_remaining(parent);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 42 | 97.67% | 1 | 50.00% |
Jens Axboe | 1 | 2.33% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(bio_chain);
static void bio_alloc_rescue(struct work_struct *work)
{
struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
struct bio *bio;
while (1) {
spin_lock(&bs->rescue_lock);
bio = bio_list_pop(&bs->rescue_list);
spin_unlock(&bs->rescue_lock);
if (!bio)
break;
generic_make_request(bio);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 74 | 100.00% | 1 | 100.00% |
Total | 74 | 100.00% | 1 | 100.00% |
static void punt_bios_to_rescuer(struct bio_set *bs)
{
struct bio_list punt, nopunt;
struct bio *bio;
if (WARN_ON_ONCE(!bs->rescue_workqueue))
return;
/*
* In order to guarantee forward progress we must punt only bios that
* were allocated from this bio_set; otherwise, if there was a bio on
* there for a stacking driver higher up in the stack, processing it
* could require allocating bios from this bio_set, and doing that from
* our own rescuer would be bad.
*
* Since bio lists are singly linked, pop them all instead of trying to
* remove from the middle of the list:
*/
bio_list_init(&punt);
bio_list_init(&nopunt);
while ((bio = bio_list_pop(¤t->bio_list[0])))
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
current->bio_list[0] = nopunt;
bio_list_init(&nopunt);
while ((bio = bio_list_pop(¤t->bio_list[1])))
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
current->bio_list[1] = nopunt;
spin_lock(&bs->rescue_lock);
bio_list_merge(&bs->rescue_list, &punt);
spin_unlock(&bs->rescue_lock);
queue_work(bs->rescue_workqueue, &bs->rescue_work);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 110 | 62.15% | 1 | 33.33% |
Neil Brown | 67 | 37.85% | 2 | 66.67% |
Total | 177 | 100.00% | 3 | 100.00% |
/**
* bio_alloc_bioset - allocate a bio for I/O
* @gfp_mask: the GFP_* mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
* @bs: the bio_set to allocate from.
*
* Description:
* If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
* backed by the @bs's mempool.
*
* When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
* always be able to allocate a bio. This is due to the mempool guarantees.
* To make this work, callers must never allocate more than 1 bio at a time
* from this pool. Callers that need to allocate more than 1 bio must always
* submit the previously allocated bio for IO before attempting to allocate
* a new one. Failure to do so can cause deadlocks under memory pressure.
*
* Note that when running under generic_make_request() (i.e. any block
* driver), bios are not submitted until after you return - see the code in
* generic_make_request() that converts recursion into iteration, to prevent
* stack overflows.
*
* This would normally mean allocating multiple bios under
* generic_make_request() would be susceptible to deadlocks, but we have
* deadlock avoidance code that resubmits any blocked bios from a rescuer
* thread.
*
* However, we do not guarantee forward progress for allocations from other
* mempools. Doing multiple allocations from the same mempool under
* generic_make_request() should be avoided - instead, use bio_set's front_pad
* for per bio allocations.
*
* RETURNS:
* Pointer to new bio on success, NULL on failure.
*/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
struct bio_set *bs)
{
gfp_t saved_gfp = gfp_mask;
unsigned front_pad;
unsigned inline_vecs;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
if (!bs) {
if (nr_iovecs > UIO_MAXIOV)
return NULL;
p = kmalloc(sizeof(struct bio) +
nr_iovecs * sizeof(struct bio_vec),
gfp_mask);
front_pad = 0;
inline_vecs = nr_iovecs;
} else {
/* should not use nobvec bioset for nr_iovecs > 0 */
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
nr_iovecs > 0))
return NULL;
/*
* generic_make_request() converts recursion to iteration; this
* means if we're running beneath it, any bios we allocate and
* submit will not be submitted (and thus freed) until after we
* return.
*
* This exposes us to a potential deadlock if we allocate
* multiple bios from the same bio_set() while running
* underneath generic_make_request(). If we were to allocate
* multiple bios (say a stacking block driver that was splitting
* bios), we would deadlock if we exhausted the mempool's
* reserve.
*
* We solve this, and guarantee forward progress, with a rescuer
* workqueue per bio_set. If we go to allocate and there are
* bios on current->bio_list, we first try the allocation
* without __GFP_DIRECT_RECLAIM; if that fails, we punt those
* bios we would be blocking to the rescuer workqueue before
* we retry with the original gfp_flags.
*/
if (current->bio_list &&
(!bio_list_empty(¤t->bio_list[0]) ||
!bio_list_empty(¤t->bio_list[1])) &&
bs->rescue_workqueue)
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
p = mempool_alloc(&bs->bio_pool, gfp_mask);
if (!p && gfp_mask != saved_gfp) {
punt_bios_to_rescuer(bs);
gfp_mask = saved_gfp;
p = mempool_alloc(&bs->bio_pool, gfp_mask);
}
front_pad = bs->front_pad;
inline_vecs = BIO_INLINE_VECS;
}
if (unlikely(!p))
return NULL;
bio = p + front_pad;
bio_init(bio, NULL, 0);
if (nr_iovecs > inline_vecs) {
unsigned long idx = 0;
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
if (!bvl && gfp_mask != saved_gfp) {
punt_bios_to_rescuer(bs);
gfp_mask = saved_gfp;
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
}
if (unlikely(!bvl))
goto err_free;
bio->bi_flags |= idx << BVEC_POOL_OFFSET;
} else if (nr_iovecs) {
bvl = bio->bi_inline_vecs;
}
bio->bi_pool = bs;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
return bio;
err_free:
mempool_free(p, &bs->bio_pool);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 169 | 45.19% | 5 | 19.23% |
Jens Axboe | 35 | 9.36% | 4 | 15.38% |
Linus Torvalds | 32 | 8.56% | 3 | 11.54% |
Ingo Molnar | 26 | 6.95% | 1 | 3.85% |
Neil Brown | 22 | 5.88% | 2 | 7.69% |
Andrew Morton | 21 | 5.61% | 2 | 7.69% |
Jun'ichi Nomura | 18 | 4.81% | 1 | 3.85% |
Christoph Hellwig | 12 | 3.21% | 1 | 3.85% |
Dave Olien | 12 | 3.21% | 1 | 3.85% |
Tejun Heo | 11 | 2.94% | 1 | 3.85% |
Subhash Peddamallu | 8 | 2.14% | 1 | 3.85% |
Lei Ming | 4 | 1.07% | 1 | 3.85% |
Mel Gorman | 2 | 0.53% | 1 | 3.85% |
Al Viro | 1 | 0.27% | 1 | 3.85% |
Dan Carpenter | 1 | 0.27% | 1 | 3.85% |
Total | 374 | 100.00% | 26 | 100.00% |
EXPORT_SYMBOL(bio_alloc_bioset);
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
{
unsigned long flags;
struct bio_vec bv;
struct bvec_iter iter;
__bio_for_each_segment(bv, bio, iter, start) {
char *data = bvec_kmap_irq(&bv, &flags);
memset(data, 0, bv.bv_len);
flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(data, &flags);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Olien | 62 | 80.52% | 1 | 33.33% |
Kent Overstreet | 15 | 19.48% | 2 | 66.67% |
Total | 77 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(zero_fill_bio_iter);
/**
* bio_put - release a reference to a bio
* @bio: bio to release reference to
*
* Description:
* Put a reference to a &struct bio, either one you have gotten with
* bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
**/
void bio_put(struct bio *bio)
{
if (!bio_flagged(bio, BIO_REFFED))
bio_free(bio);
else {
BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
/*
* last put frees it
*/
if (atomic_dec_and_test(&bio->__bi_cnt))
bio_free(bio);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 30 | 53.57% | 3 | 60.00% |
Jens Axboe | 21 | 37.50% | 1 | 20.00% |
Kent Overstreet | 5 | 8.93% | 1 | 20.00% |
Total | 56 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(bio_put);
inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
return bio->bi_phys_segments;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 35 | 85.37% | 1 | 33.33% |
Jens Axboe | 6 | 14.63% | 2 | 66.67% |
Total | 41 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(bio_phys_segments);
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
* @bio: destination bio
* @bio_src: bio to clone
*
* Clone a &bio. Caller will own the returned bio, but not
* the actual data it points to. Reference count of returned
* bio will be one.
*
* Caller must ensure that @bio_src is not freed before @bio.
*/
void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
{
BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
/*
* most users will be overriding ->bi_disk with a new target,
* so we don't set nor calculate new physical/hw segment counts here
*/
bio->bi_disk = bio_src->bi_disk;
bio->bi_partno = bio_src->bi_partno;
bio_set_flag(bio, BIO_CLONED);
if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
bio_clone_blkcg_association(bio, bio_src);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 57 | 53.77% | 1 | 11.11% |
Shaohua Li | 16 | 15.09% | 1 | 11.11% |
Jens Axboe | 14 | 13.21% | 3 | 33.33% |
Michael Lyle | 8 | 7.55% | 1 | 11.11% |
Paolo Valente | 7 | 6.60% | 1 | 11.11% |
Christoph Hellwig | 4 | 3.77% | 2 | 22.22% |
Total | 106 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(__bio_clone_fast);
/**
* bio_clone_fast - clone a bio that shares the original bio's biovec
* @bio: bio to clone
* @gfp_mask: allocation priority
* @bs: bio_set to allocate from
*
* Like __bio_clone_fast, only also allocates the returned bio
*/
struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
{
struct bio *b;
b = bio_alloc_bioset(gfp_mask, 0, bs);
if (!b)
return NULL;
__bio_clone_fast(b, bio);
if (bio_integrity(bio)) {
int ret;
ret = bio_integrity_clone(b, bio, gfp_mask);
if (ret < 0) {
bio_put(b);
return NULL;
}
}
return b;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 93 | 100.00% | 1 | 100.00% |
Total | 93 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(bio_clone_fast);
/**
* bio_clone_bioset - clone a bio
* @bio_src: bio to clone
* @gfp_mask: allocation priority
* @bs: bio_set to allocate from
*
* Clone bio. Caller will own the returned bio, but not the actual data it
* points to. Reference count of returned bio will be one.
*/
struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
struct bio_set *bs)
{
struct bvec_iter iter;
struct bio_vec bv;
struct bio *bio;
/*
* Pre immutable biovecs, __bio_clone() used to just do a memcpy from
* bio_src->bi_io_vec to bio->bi_io_vec.
*
* We can't do that anymore, because:
*
* - The point of cloning the biovec is to produce a bio with a biovec
* the caller can modify: bi_idx and bi_bvec_done should be 0.
*
* - The original bio could've had more than BIO_MAX_PAGES biovecs; if
* we tried to clone the whole thing bio_alloc_bioset() would fail.
* But the clone should succeed as long as the number of biovecs we
* actually need to allocate is fewer than BIO_MAX_PAGES.
*
* - Lastly, bi_vcnt should not be looked at or relied upon by code
* that does not own the bio - reason being drivers don't use it for
* iterating over the biovec anymore, so expecting it to be kept up
* to date (i.e. for clones that share the parent biovec) is just
* asking for trouble and would force extra work on
* __bio_clone_fast() anyways.
*/
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
bio->bi_disk = bio_src->bi_disk;
bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
break;
case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
break;
default:
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
break;
}
if (bio_integrity(bio_src)) {
int ret;
ret = bio_integrity_clone(bio, bio_src, gfp_mask);
if (ret < 0) {
bio_put(bio);
return NULL;
}
}
bio_clone_blkcg_association(bio, bio_src);
return bio;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kent Overstreet | 104 | 48.37% | 3 | 16.67% |
Martin K. Petersen | 31 | 14.42% | 1 | 5.56% |
Linus Torvalds | 28 | 13.02% | 2 | 11.11% |
Adrian Hunter | 15 | 6.98% | 1 | 5.56% |
Jens Axboe | 10 | 4.65% | 2 | 11.11% |
Paolo Valente | 7 | 3.26% | 1 | 5.56% |
Li Zefan | 6 | 2.79% | 1 | 5.56% |
Shaohua Li | 3 | 1.40% | 1 | 5.56% |
Michael Christie | 3 | 1.40% | 1 | 5.56% |
Chaitanya Kulkarni | 3 | 1.40% | 1 | 5.56% |
Christoph Hellwig | 2 | 0.93% | 1 | 5.56% |
Jun'ichi Nomura | 1 | 0.47% | 1 | 5.56% |
Al Viro | 1 | 0.47% | 1 | 5.56% |
Peter Osterlund | 1 | 0.47% | 1 | 5.56% |
Total | 215 | 100.00% | 18 | 100.00% |
EXPORT_SYMBOL(bio_clone_bioset);
/**
* bio_add_pc_page - attempt to add page to bio
* @q: the target queue
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block device
* limitations. The target block device must allow bio's up to PAGE_SIZE,
* so it is always possible to add a single page to an empty bio.
*
* This should only be used by REQ_PC bios.
*/
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset)
{
int retried_segments = 0;
struct bio_vec *bvec;
/*
* cloned bio must not modify vec list
*/
if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 0;
if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
return 0;
/*
* For filesystems with a blocksize smaller than the pagesize
* we will often be called with the same page as last time and
* a consecutive offset. Optimize this special case.
*/
if (bio->bi_vcnt > 0) {
struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page == prev->bv_page &&
offset == prev->bv_offset + prev->bv_len) {
prev->bv_len += len;
bio->bi_iter.bi_size += len;
goto done;
}
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
if (bvec_gap_to_prev(q, prev, offset))
return 0;
}
if (bio_full(bio))
return 0;
/*
* setup the new entry, we might clear it again later if we
* cannot add the page
*/
bvec = &bio->bi_io_vec[bio->bi_vcnt];
bvec->bv_page = page;
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
bio->bi_phys_segments++;
bio->bi_iter.bi_size += len;
/*
* Perform a recount if the number of segments is greater
* than queue_max_segments(q).
*/
while (bio->bi_phys_segments > queue_max_segments(q)) {
if (retried_segments)
goto failed;
retried_segments = 1;
blk_recount_segments(q, bio);
}
/* If we may be able to merge these biovecs, force a recount */
if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
bio_clear_flag(bio, BIO_SEG_VALID);
done:
return len;
failed:
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
bio->bi_vcnt--;
bio->bi_iter.bi_size -= len;
blk_recount_segments(q, bio);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 210 | 64.81% | 7 | 50.00% |
Maurizio Lombardi | 89 | 27.47% | 1 | 7.14% |
Kent Overstreet | 9 | 2.78% | 2 | 14.29% |
Mike Christie | 8 | 2.47% | 1 | 7.14% |
Christoph Hellwig | 3 | 0.93% | 1 | 7.14% |
Patrick Mansfield | 3 | 0.93% | 1 | 7.14% |
Keith Busch | 2 | 0.62% | 1 | 7.14% |
Total | 324 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL(bio_add_pc_page)