Contributors: 5
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Christoph Hellwig |
263 |
98.13% |
4 |
50.00% |
Bart Van Assche |
2 |
0.75% |
1 |
12.50% |
David Chinner |
1 |
0.37% |
1 |
12.50% |
Matthew Wilcox |
1 |
0.37% |
1 |
12.50% |
Russell Cattelan |
1 |
0.37% |
1 |
12.50% |
Total |
268 |
|
8 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Christoph Hellwig.
*/
#include "xfs.h"
static inline unsigned int bio_max_vecs(unsigned int count)
{
return bio_max_segs(howmany(count, PAGE_SIZE));
}
int
xfs_rw_bdev(
struct block_device *bdev,
sector_t sector,
unsigned int count,
char *data,
enum req_op op)
{
unsigned int is_vmalloc = is_vmalloc_addr(data);
unsigned int left = count;
int error;
struct bio *bio;
if (is_vmalloc && op == REQ_OP_WRITE)
flush_kernel_vmap_range(data, count);
bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC,
GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
do {
struct page *page = kmem_to_page(data);
unsigned int off = offset_in_page(data);
unsigned int len = min_t(unsigned, left, PAGE_SIZE - off);
while (bio_add_page(bio, page, len, off) != len) {
struct bio *prev = bio;
bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left),
prev->bi_opf, GFP_KERNEL);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_chain(prev, bio);
submit_bio(prev);
}
data += len;
left -= len;
} while (left > 0);
error = submit_bio_wait(bio);
bio_put(bio);
if (is_vmalloc && op == REQ_OP_READ)
invalidate_kernel_vmap_range(data, count);
return error;
}