cregit-Linux how code gets into the kernel

Release 4.11 block/blk-merge.c

Directory: block
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include <trace/events/block.h>

#include "blk.h"


static struct bio *blk_bio_discard_split(struct request_queue *q, struct bio *bio, struct bio_set *bs, unsigned *nsegs) { unsigned int max_discard_sectors, granularity; int alignment; sector_t tmp; unsigned split_sectors; *nsegs = 1; /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); max_discard_sectors -= max_discard_sectors % granularity; if (unlikely(!max_discard_sectors)) { /* XXX: warn */ return NULL; } if (bio_sectors(bio) <= max_discard_sectors) return NULL; split_sectors = max_discard_sectors; /* * If the next starting sector would be misaligned, stop the discard at * the previous aligned sector. */ alignment = (q->limits.discard_alignment >> 9) % granularity; tmp = bio->bi_iter.bi_sector + split_sectors - alignment; tmp = sector_div(tmp, granularity); if (split_sectors > tmp) split_sectors -= tmp; return bio_split(bio, split_sectors, GFP_NOIO, bs); }

Contributors

PersonTokensPropCommitsCommitProp
Kent Overstreet16394.77%150.00%
Ming Lei95.23%150.00%
Total172100.00%2100.00%


static struct bio *blk_bio_write_same_split(struct request_queue *q, struct bio *bio, struct bio_set *bs, unsigned *nsegs) { *nsegs = 1; if (!q->limits.max_write_same_sectors) return NULL; if (bio_sectors(bio) <= q->limits.max_write_same_sectors) return NULL; return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); }

Contributors

PersonTokensPropCommitsCommitProp
Kent Overstreet6788.16%150.00%
Ming Lei911.84%150.00%
Total76100.00%2100.00%


static inline unsigned get_max_io_size(struct request_queue *q, struct bio *bio) { unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); unsigned mask = queue_logical_block_size(q) - 1; /* aligned to logical block size */ sectors &= ~(mask >> 9); return sectors; }

Contributors

PersonTokensPropCommitsCommitProp
Lei Ming54100.00%1100.00%
Total54100.00%1100.00%


static struct bio *blk_bio_segment_split(struct request_queue *q, struct bio *bio, struct bio_set *bs, unsigned *segs) { struct bio_vec bv, bvprv, *bvprvp = NULL; struct bvec_iter iter; unsigned seg_size = 0, nsegs = 0, sectors = 0; unsigned front_seg_size = bio->bi_seg_front_size; bool do_split = true; struct bio *new = NULL; const unsigned max_sectors = get_max_io_size(q, bio); unsigned bvecs = 0; bio_for_each_segment(bv, bio, iter) { /* * With arbitrary bio size, the incoming bio may be very * big. We have to split the bio into small bios so that * each holds at most BIO_MAX_PAGES bvecs because * bio_clone() can fail to allocate big bvecs. * * It should have been better to apply the limit per * request queue in which bio_clone() is involved, * instead of globally. The biggest blocker is the * bio_clone() in bio bounce. * * If bio is splitted by this reason, we should have * allowed to continue bios merging, but don't do * that now for making the change simple. * * TODO: deal with bio bounce's bio_clone() gracefully * and convert the global limit into per-queue limit. */ if (bvecs++ >= BIO_MAX_PAGES) goto split; /* * If the queue doesn't support SG gaps and adding this * offset would create a gap, disallow it. */ if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) goto split; if (sectors + (bv.bv_len >> 9) > max_sectors) { /* * Consider this a new segment if we're splitting in * the middle of this vector. */ if (nsegs < queue_max_segments(q) && sectors < max_sectors) { nsegs++; sectors = max_sectors; } if (sectors) goto split; /* Make this single bvec as the 1st segment */ } if (bvprvp && blk_queue_cluster(q)) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) goto new_segment; seg_size += bv.bv_len; bvprv = bv; bvprvp = &bvprv; sectors += bv.bv_len >> 9; if (nsegs == 1 && seg_size > front_seg_size) front_seg_size = seg_size; continue; } new_segment: if (nsegs == queue_max_segments(q)) goto split; nsegs++; bvprv = bv; bvprvp = &bvprv; seg_size = bv.bv_len; sectors += bv.bv_len >> 9; if (nsegs == 1 && seg_size > front_seg_size) front_seg_size = seg_size; } do_split = false; split: *segs = nsegs; if (do_split) { new = bio_split(bio, sectors, GFP_NOIO, bs); if (new) bio = new; } bio->bi_seg_front_size = front_seg_size; if (seg_size > bio->bi_seg_back_size) bio->bi_seg_back_size = seg_size; return do_split ? new : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Kent Overstreet17745.27%218.18%
Ming Lei14537.08%654.55%
Keith Busch389.72%19.09%
Lei Ming194.86%19.09%
Jens Axboe123.07%19.09%
Total391100.00%11100.00%


void blk_queue_split(struct request_queue *q, struct bio **bio, struct bio_set *bs) { struct bio *split, *res; unsigned nsegs; switch (bio_op(*bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: split = blk_bio_discard_split(q, *bio, bs, &nsegs); break; case REQ_OP_WRITE_ZEROES: split = NULL; nsegs = (*bio)->bi_phys_segments; break; case REQ_OP_WRITE_SAME: split = blk_bio_write_same_split(q, *bio, bs, &nsegs); break; default: split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); break; } /* physical segments can be figured out during splitting */ res = split ? split : *bio; res->bi_phys_segments = nsegs; bio_set_flag(res, BIO_SEG_VALID); if (split) { /* there isn't chance to merge the splitted bio */ split->bi_opf |= REQ_NOMERGE; bio_chain(split, *bio); trace_block_split(q, split, (*bio)->bi_iter.bi_sector); generic_make_request(*bio); *bio = split; } }

Contributors

PersonTokensPropCommitsCommitProp
Kent Overstreet9348.95%112.50%
Ming Lei4423.16%225.00%
Chaitanya Kulkarni178.95%112.50%
Mike Krinkin168.42%112.50%
Adrian Hunter157.89%112.50%
Michael Christie42.11%112.50%
Jens Axboe10.53%112.50%
Total190100.00%8100.00%

EXPORT_SYMBOL(blk_queue_split);
static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio, bool no_sg_merge) { struct bio_vec bv, bvprv = { NULL }; int cluster, prev = 0; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; struct bvec_iter iter; if (!bio) return 0; switch (bio_op(bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: return 0; case REQ_OP_WRITE_SAME: return 1; } fbio = bio; cluster = blk_queue_cluster(q); seg_size = 0; nr_phys_segs = 0; for_each_bio(bio) { bio_for_each_segment(bv, bio, iter) { /* * If SG merging is disabled, each bio vector is * a segment */ if (no_sg_merge) goto new_segment; if (prev && cluster) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) goto new_segment; seg_size += bv.bv_len; bvprv = bv; continue; } new_segment: if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) fbio->bi_seg_front_size = seg_size; nr_phys_segs++; bvprv = bv; prev = 1; seg_size = bv.bv_len; } bbio = bio; } if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) fbio->bi_seg_front_size = seg_size; if (seg_size > bbio->bi_seg_back_size) bbio->bi_seg_back_size = seg_size; return nr_phys_segs; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe17764.13%425.00%
FUJITA Tomonori4114.86%16.25%
Kent Overstreet279.78%318.75%
Chaitanya Kulkarni124.35%16.25%
Christoph Hellwig51.81%16.25%
Martin K. Petersen41.45%212.50%
Michael Christie41.45%16.25%
Ming Lei31.09%16.25%
Nicholas Piggin20.72%16.25%
Adrian Hunter10.36%16.25%
Total276100.00%16100.00%


void blk_recalc_rq_segments(struct request *rq) { bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, &rq->q->queue_flags); rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, no_sg_merge); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2556.82%266.67%
Ming Lei1943.18%133.33%
Total44100.00%3100.00%


void blk_recount_segments(struct request_queue *q, struct bio *bio) { unsigned short seg_cnt; /* estimate segment number by bi_vcnt for non-cloned bio */ if (bio_flagged(bio, BIO_CLONED)) seg_cnt = bio_segments(bio); else seg_cnt = bio->bi_vcnt; if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && (seg_cnt < queue_max_segments(q))) bio->bi_phys_segments = seg_cnt; else { struct bio *nxt = bio->bi_next; bio->bi_next = NULL; bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); bio->bi_next = nxt; } bio_set_flag(bio, BIO_SEG_VALID); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7263.16%457.14%
Lei Ming4135.96%228.57%
Ming Lei10.88%114.29%
Total114100.00%7100.00%

EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { struct bio_vec end_bv = { NULL }, nxt_bv; if (!blk_queue_cluster(q)) return 0; if (bio->bi_seg_back_size + nxt->bi_seg_front_size > queue_max_segment_size(q)) return 0; if (!bio_has_data(bio)) return 1; bio_get_last_bvec(bio, &end_bv); bio_get_first_bvec(nxt, &nxt_bv); if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) return 0; /* * bio and nxt are contiguous in memory; check if the queue allows * these two to be merged into one */ if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe5444.26%111.11%
Kent Overstreet2621.31%222.22%
David Woodhouse2520.49%111.11%
Ming Lei97.38%111.11%
Martin K. Petersen43.28%222.22%
Nicholas Piggin21.64%111.11%
FUJITA Tomonori21.64%111.11%
Total122100.00%9100.00%


static inline void __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, struct scatterlist *sglist, struct bio_vec *bvprv, struct scatterlist **sg, int *nsegs, int *cluster) { int nbytes = bvec->bv_len; if (*sg && *cluster) { if ((*sg)->length + nbytes > queue_max_segment_size(q)) goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) goto new_segment; (*sg)->length += nbytes; } else { new_segment: if (!*sg) *sg = sglist; else { /* * If the driver previously mapped a shorter * list, we could see a termination bit * prematurely unless it fully inits the sg * table on each mapping. We KNOW that there * must be more entries here or the driver * would be buggy, so force clear the * termination bit to avoid doing a full * sg_init_table() in drivers for each command. */ sg_unmark_end(*sg); *sg = sg_next(*sg); } sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); (*nsegs)++; } *bvprv = *bvec; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe13073.45%120.00%
Asias He3821.47%120.00%
Paolo Bonzini31.69%120.00%
Kent Overstreet31.69%120.00%
Martin K. Petersen31.69%120.00%
Total177100.00%5100.00%


static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, struct scatterlist *sglist, struct scatterlist **sg) { *sg = sglist; sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig53100.00%1100.00%
Total53100.00%1100.00%


static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist, struct scatterlist **sg) { struct bio_vec bvec, bvprv = { NULL }; struct bvec_iter iter; int cluster = blk_queue_cluster(q), nsegs = 0; for_each_bio(bio) bio_for_each_segment(bvec, bio, iter) __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, &nsegs, &cluster); return nsegs; }

Contributors

PersonTokensPropCommitsCommitProp
Asias He5460.67%116.67%
Kent Overstreet2629.21%350.00%
Christoph Hellwig55.62%116.67%
Jens Axboe44.49%116.67%
Total89100.00%6100.00%

/* * map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries */
int blk_rq_map_sg(struct request_queue *q, struct request *rq, struct scatterlist *sglist) { struct scatterlist *sg = NULL; int nsegs = 0; if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); else if (rq->bio) nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); if (unlikely(rq->rq_flags & RQF_COPY_USER) && (blk_rq_bytes(rq) & q->dma_pad_mask)) { unsigned int pad_len = (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; sg->length += pad_len; rq->extra_len += pad_len; } if (q->dma_drain_size && q->dma_drain_needed(rq)) { if (op_is_write(req_op(rq))) memset(q->dma_drain_buffer, 0, q->dma_drain_size); sg_unmark_end(sg); sg = sg_next(sg); sg_set_page(sg, virt_to_page(q->dma_drain_buffer), q->dma_drain_size, ((unsigned long)q->dma_drain_buffer) & (PAGE_SIZE - 1)); nsegs++; rq->extra_len += q->dma_drain_size; } if (sg) sg_mark_end(sg); /* * Something must have been wrong if the figured number of * segment is bigger than number of req's physical segments */ WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); return nsegs; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe6522.89%17.69%
Christoph Hellwig6522.89%215.38%
Kent Overstreet5318.66%17.69%
FUJITA Tomonori4716.55%215.38%
Tejun Heo3713.03%430.77%
Ming Lei82.82%17.69%
Michael Christie62.11%17.69%
Dan J Williams31.06%17.69%
Total284100.00%13100.00%

EXPORT_SYMBOL(blk_rq_map_sg);
static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) { int nr_phys_segs = bio_phys_segments(q, bio); if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) goto no_merge; if (blk_integrity_merge_bio(q, req, bio) == false) goto no_merge; /* * This will form the start of a new hw segment. Bump both * counters. */ req->nr_phys_segments += nr_phys_segs; return 1; no_merge: req_set_nomerge(q, req); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4552.33%114.29%
Martin K. Petersen3439.53%457.14%
Ritesh Harjani55.81%114.29%
Mikulas Patocka22.33%114.29%
Total86100.00%7100.00%


int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { if (req_gap_back_merge(req, bio)) return 0; if (blk_integrity_rq(req) && integrity_req_gap_back_merge(req, bio)) return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, blk_rq_pos(req))) { req_set_nomerge(q, req); return 0; } if (!bio_flagged(req->biotail, BIO_SEG_VALID)) blk_recount_segments(q, req->biotail); if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); return ll_new_hw_segment(q, req, bio); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9774.05%228.57%
Sagi Grimberg1712.98%114.29%
Damien Le Moal53.82%114.29%
Ritesh Harjani53.82%114.29%
Martin K. Petersen43.05%114.29%
Tejun Heo32.29%114.29%
Total131100.00%7100.00%


int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { if (req_gap_front_merge(req, bio)) return 0; if (blk_integrity_rq(req) && integrity_req_gap_front_merge(req, bio)) return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { req_set_nomerge(q, req); return 0; } if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); if (!bio_flagged(req->bio, BIO_SEG_VALID)) blk_recount_segments(q, req->bio); return ll_new_hw_segment(q, req, bio); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9773.48%228.57%
Sagi Grimberg1712.88%114.29%
Damien Le Moal64.55%114.29%
Ritesh Harjani53.79%114.29%
Martin K. Petersen43.03%114.29%
Tejun Heo32.27%114.29%
Total132100.00%7100.00%

/* * blk-mq uses req->special to carry normal driver per-request payload, it * does not indicate a prepared command that we cannot merge with. */
static bool req_no_special_merge(struct request *req) { struct request_queue *q = req->q; return !q->mq_ops && req->special; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe30100.00%1100.00%
Total30100.00%1100.00%


static int ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) { int total_phys_segments; unsigned int seg_size = req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; /* * First check if the either of the requests are re-queued * requests. Can't merge them if they are. */ if (req_no_special_merge(req) || req_no_special_merge(next)) return 0; if (req_gap_back_merge(req, next->bio)) return 0; /* * Will it become too large? */ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > blk_rq_get_max_sectors(req, blk_rq_pos(req))) return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; if (blk_phys_contig_segment(q, req->biotail, next->bio)) { if (req->nr_phys_segments == 1) req->bio->bi_seg_front_size = seg_size; if (next->nr_phys_segments == 1) next->biotail->bi_seg_back_size = seg_size; total_phys_segments--; } if (total_phys_segments > queue_max_segments(q)) return 0; if (blk_integrity_merge_rq(q, req, next) == false) return 0; /* Merge is OK... */ req->nr_phys_segments = total_phys_segments; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10151.01%323.08%
FUJITA Tomonori5025.25%17.69%
Martin K. Petersen2311.62%538.46%
Keith Busch136.57%215.38%
Tejun Heo63.03%17.69%
Damien Le Moal52.53%17.69%
Total198100.00%13100.00%

/** * blk_rq_set_mixed_merge - mark a request as mixed merge * @rq: request to mark as mixed merge * * Description: * @rq is about to be mixed merged. Make sure the attributes * which can be mixed are set in each bio and mark @rq as mixed * merged. */
void blk_rq_set_mixed_merge(struct request *rq) { unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; struct bio *bio; if (rq->rq_flags & RQF_MIXED_MERGE) return; /* * @rq will no longer represent mixable attributes for all the * contained bios. It will just track those of the first one. * Distributes the attributs to each bio. */ for (bio = rq->bio; bio; bio = bio->bi_next) { WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && (bio->bi_opf & REQ_FAILFAST_MASK) != ff); bio->bi_opf |= ff; } rq->rq_flags |= RQF_MIXED_MERGE; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo7991.86%133.33%
Christoph Hellwig44.65%133.33%
Jens Axboe33.49%133.33%
Total86100.00%3100.00%


static void blk_account_io_merge(struct request *req) { if (blk_do_io_stat(req)) { struct hd_struct *part; int cpu; cpu = part_stat_lock(); part = req->part; part_round_stats(cpu, part); part_dec_in_flight(part, rq_data_dir(req)); hd_struct_put(part); part_stat_unlock(); } }

Contributors

PersonTokensPropCommitsCommitProp
Jerome Marchand5890.62%250.00%
Nikanth Karthikesan57.81%125.00%
Jens Axboe11.56%125.00%
Total64100.00%4100.00%

/* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. */
static struct request *attempt_merge(struct request_queue *q, struct request *req, struct request *next) { if (!rq_mergeable(req) || !rq_mergeable(next)) return NULL; if (req_op(req) != req_op(next)) return NULL; /* * not contiguous */ if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) return NULL; if (rq_data_dir(req) != rq_data_dir(next) || req->rq_disk != next->rq_disk || req_no_special_merge(next)) return NULL; if (req_op(req) == REQ_OP_WRITE_SAME && !blk_write_same_mergeable(req->bio, next->bio)) return NULL; /* * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn * will have updated segment counts, update sector * counts here. */ if (!ll_merge_requests_fn(q, req, next)) return NULL; /* * If failfast settings disagree or any of the two is already * a mixed merge, mark both as mixed before proceeding. This * makes sure that all involved bios have mixable attributes * set properly. */ if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || (req->