cregit-Linux how code gets into the kernel

Release 4.13 block/blk-lib.c

Directory: block
/*
 * Functions related to generic helpers functions
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"


static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) { struct bio *new = bio_alloc(gfp, nr_pages); if (bio) { bio_chain(bio, new); submit_bio(bio); } return new; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3056.60%250.00%
Dmitriy Monakhov1935.85%125.00%
Lukas Czerner47.55%125.00%
Total53100.00%4100.00%


int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, int flags, struct bio **biop) { struct request_queue *q = bdev_get_queue(bdev); struct bio *bio = *biop; unsigned int granularity; unsigned int op; int alignment; sector_t bs_mask; if (!q) return -ENXIO; if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secure_erase(q)) return -EOPNOTSUPP; op = REQ_OP_SECURE_ERASE; } else { if (!blk_queue_discard(q)) return -EOPNOTSUPP; op = REQ_OP_DISCARD; } bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; while (nr_sects) { unsigned int req_sects; sector_t end_sect, tmp; /* Make sure bi_size doesn't overflow */ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); /** * If splitting a request, and the next starting sector would be * misaligned, stop the discard at the previous aligned sector. */ end_sect = sector + req_sects; tmp = end_sect; if (req_sects < nr_sects && sector_div(tmp, granularity) != alignment) { end_sect = end_sect - alignment; sector_div(end_sect, granularity); end_sect = end_sect * granularity + alignment; req_sects = end_sect - sector; } bio = next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio_set_op_attrs(bio, op, 0); bio->bi_iter.bi_size = req_sects << 9; nr_sects -= req_sects; sector = end_sect; /* * We can loop for a long time in here, if someone does * full device discards (like mkfs). Be nice and allow * us to schedule out to avoid softlocking if preempt * is disabled. */ cond_resched(); } *biop = bio; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitriy Monakhov9128.80%16.25%
Ming Lin8326.27%16.25%
Christoph Hellwig6520.57%531.25%
Darrick J. Wong309.49%16.25%
Paolo Bonzini268.23%16.25%
Jens Axboe92.85%212.50%
Michael Christie61.90%212.50%
Kent Overstreet41.27%16.25%
Shaohua Li10.32%16.25%
Lukas Czerner10.32%16.25%
Total316100.00%16100.00%

EXPORT_SYMBOL(__blkdev_issue_discard); /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: BLKDEV_DISCARD_* flags to control behaviour * * Description: * Issue a discard request for the sectors in question. */
int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { struct bio *bio = NULL; struct blk_plug plug; int ret; blk_start_plug(&plug); ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, &bio); if (!ret && bio) { ret = submit_bio_wait(bio); if (ret == -EOPNOTSUPP) ret = 0; bio_put(bio); } blk_finish_plug(&plug); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig7472.55%342.86%
Mike Snitzer1312.75%114.29%
Dmitriy Monakhov87.84%114.29%
Shaun Tancheff54.90%114.29%
Lukas Czerner21.96%114.29%
Total102100.00%7100.00%

EXPORT_SYMBOL(blkdev_issue_discard); /** * __blkdev_issue_write_same - generate number of bios with same page * @bdev: target blockdev * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @page: page containing data to write * @biop: pointer to anchor bio * * Description: * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. */
static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page, struct bio **biop) { struct request_queue *q = bdev_get_queue(bdev); unsigned int max_write_same_sectors; struct bio *bio = *biop; sector_t bs_mask; if (!q) return -ENXIO; bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; if (!bdev_write_same(bdev)) return -EOPNOTSUPP; /* Ensure that max_write_same_sectors doesn't overflow bi_size */ max_write_same_sectors = UINT_MAX >> 9; while (nr_sects) { bio = next_bio(bio, 1, gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); if (nr_sects > max_write_same_sectors) { bio->bi_iter.bi_size = max_write_same_sectors << 9; nr_sects -= max_write_same_sectors; sector += max_write_same_sectors; } else { bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; } cond_resched(); } *biop = bio; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen14862.18%112.50%
Chaitanya Kulkarni3414.29%112.50%
Darrick J. Wong3012.61%112.50%
Michael Christie93.78%225.00%
Christoph Hellwig72.94%112.50%
Kent Overstreet62.52%112.50%
Ming Lin41.68%112.50%
Total238100.00%8100.00%

/** * blkdev_issue_write_same - queue a write same operation * @bdev: target blockdev * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @page: page containing data * * Description: * Issue a write same request for the sectors in question. */
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page) { struct bio *bio = NULL; struct blk_plug plug; int ret; blk_start_plug(&plug); ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, &bio); if (ret == 0 && bio) { ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Chaitanya Kulkarni7176.34%125.00%
Martin K. Petersen88.60%125.00%
Christoph Hellwig77.53%125.00%
Shaun Tancheff77.53%125.00%
Total93100.00%4100.00%

EXPORT_SYMBOL(blkdev_issue_write_same);
static int __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { struct bio *bio = *biop; unsigned int max_write_zeroes_sectors; struct request_queue *q = bdev_get_queue(bdev); if (!q) return -ENXIO; /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); if (max_write_zeroes_sectors == 0) return -EOPNOTSUPP; while (nr_sects) { bio = next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio->bi_opf = REQ_OP_WRITE_ZEROES; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; if (nr_sects > max_write_zeroes_sectors) { bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; nr_sects -= max_write_zeroes_sectors; sector += max_write_zeroes_sectors; } else { bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; } cond_resched(); } *biop = bio; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Chaitanya Kulkarni16390.06%150.00%
Christoph Hellwig189.94%150.00%
Total181100.00%2100.00%

/* * Convert a number of 512B sectors to a number of pages. * The result is limited to a number of pages that can fit into a BIO. * Also make sure that the result is always at least 1 (page) for the cases * where nr_sects is lower than the number of sectors in a page. */
static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) { sector_t bytes = (nr_sects << 9) + PAGE_SIZE - 1; return min(bytes >> PAGE_SHIFT, (sector_t)BIO_MAX_PAGES); }

Contributors

PersonTokensPropCommitsCommitProp
Damien Le Moal36100.00%1100.00%
Total36100.00%1100.00%

/** * __blkdev_issue_zeroout - generate number of zero filed write bios * @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @biop: pointer to anchor bio * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. * * Note that this function may fail with -EOPNOTSUPP if the driver signals * zeroing offload support, but the device fails to process the command (for * some devices there is no non-destructive way to verify whether this * operation is actually supported). In this case the caller should call * retry the call to blkdev_issue_zeroout() and the fallback path will be used. * * If a device is using logical block provisioning, the underlying space will * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. * * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. */
int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { int ret; int bi_size = 0; struct bio *bio = *biop; unsigned int sz; sector_t bs_mask; bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, biop, flags); if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) goto out; ret = 0; while (nr_sects != 0) { bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE, nr_sects << 9); bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); nr_sects -= bi_size >> 9; sector += bi_size >> 9; if (bi_size < sz) break; } cond_resched(); } *biop = bio; out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitriy Monakhov10545.65%16.25%
Chaitanya Kulkarni5523.91%212.50%
Darrick J. Wong3013.04%16.25%
Christoph Hellwig177.39%531.25%
Michael Christie93.91%212.50%
Shaun Tancheff52.17%16.25%
Damien Le Moal31.30%16.25%
Jens Axboe31.30%16.25%
Kent Overstreet20.87%16.25%
Martin K. Petersen10.43%16.25%
Total230100.00%16100.00%

EXPORT_SYMBOL(__blkdev_issue_zeroout); /** * blkdev_issue_zeroout - zero-fill a block range * @bdev: blockdev to write * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. See __blkdev_issue_zeroout() for the * valid values for %flags. */
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) { int ret; struct bio *bio = NULL; struct blk_plug plug; blk_start_plug(&plug); ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, &bio, flags); if (ret == 0 && bio) { ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Chaitanya Kulkarni4953.85%120.00%
Martin K. Petersen3942.86%360.00%
Christoph Hellwig33.30%120.00%
Total91100.00%5100.00%

EXPORT_SYMBOL(blkdev_issue_zeroout);

Overall Contributors

PersonTokensPropCommitsCommitProp
Chaitanya Kulkarni37927.27%25.71%
Dmitriy Monakhov25218.13%25.71%
Christoph Hellwig22816.40%1028.57%
Martin K. Petersen20114.46%411.43%
Darrick J. Wong906.47%12.86%
Ming Lin876.26%25.71%
Damien Le Moal402.88%12.86%
Paolo Bonzini261.87%12.86%
Michael Christie241.73%38.57%
Shaun Tancheff171.22%12.86%
Mike Snitzer130.94%12.86%
Kent Overstreet120.86%12.86%
Jens Axboe120.86%38.57%
Lukas Czerner70.50%12.86%
Shaohua Li10.07%12.86%
Eric Biggers10.07%12.86%
Total1390100.00%35100.00%
Directory: block
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.