Release 4.10 fs/f2fs/data.c
/*
* fs/f2fs/data.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "trace.h"
#include <trace/events/f2fs.h>
static bool __is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
struct f2fs_sb_info *sbi;
if (!mapping)
return false;
inode = mapping->host;
sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_META_INO(sbi) ||
inode->i_ino == F2FS_NODE_INO(sbi) ||
S_ISDIR(inode->i_mode) ||
is_cold_data(page))
return true;
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 89 | 100.00% | 1 | 100.00% |
| Total | 89 | 100.00% | 1 | 100.00% |
static void f2fs_read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
bio->bi_error = -EIO;
#endif
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
fscrypt_release_ctx(bio->bi_private);
} else {
fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
}
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
}
bio_put(bio);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 109 | 73.65% | 6 | 54.55% |
chao yu | chao yu | 30 | 20.27% | 3 | 27.27% |
christoph hellwig | christoph hellwig | 6 | 4.05% | 1 | 9.09% |
linus torvalds | linus torvalds | 3 | 2.03% | 1 | 9.09% |
| Total | 148 | 100.00% | 11 | 100.00% |
static void f2fs_write_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi = bio->bi_private;
struct bio_vec *bvec;
int i;
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) {
mapping_set_error(page->mapping, -EIO);
f2fs_stop_checkpoint(sbi, true);
}
dec_page_count(sbi, type);
clear_cold_data(page);
end_page_writeback(page);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
bio_put(bio);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 80 | 57.14% | 9 | 60.00% |
chao yu | chao yu | 25 | 17.86% | 1 | 6.67% |
linus torvalds | linus torvalds | 18 | 12.86% | 1 | 6.67% |
huajun li | huajun li | 7 | 5.00% | 1 | 6.67% |
michal hocko | michal hocko | 4 | 2.86% | 1 | 6.67% |
christoph hellwig | christoph hellwig | 3 | 2.14% | 1 | 6.67% |
namjae jeon | namjae jeon | 3 | 2.14% | 1 | 6.67% |
| Total | 140 | 100.00% | 15 | 100.00% |
/*
* Return true, if pre_bio's bdev is same as its target device.
*/
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
struct block_device *bdev = sbi->sb->s_bdev;
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
if (FDEV(i).start_blk <= blk_addr &&
FDEV(i).end_blk >= blk_addr) {
blk_addr -= FDEV(i).start_blk;
bdev = FDEV(i).bdev;
break;
}
}
if (bio) {
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
}
return bdev;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 118 | 100.00% | 1 | 100.00% |
| Total | 118 | 100.00% | 1 | 100.00% |
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
int i;
for (i = 0; i < sbi->s_ndevs; i++)
if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
return i;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 57 | 100.00% | 1 | 100.00% |
| Total | 57 | 100.00% | 1 | 100.00% |
static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 33 | 100.00% | 1 | 100.00% |
| Total | 33 | 100.00% | 1 | 100.00% |
/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
int npages, bool is_read)
{
struct bio *bio;
bio = f2fs_bio_alloc(npages);
f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
return bio;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
gu zheng | gu zheng | 50 | 75.76% | 1 | 20.00% |
jaegeuk kim | jaegeuk kim | 12 | 18.18% | 3 | 60.00% |
chao yu | chao yu | 4 | 6.06% | 1 | 20.00% |
| Total | 66 | 100.00% | 5 | 100.00% |
static inline void __submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type)
{
if (!is_read_io(bio_op(bio))) {
if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
current->plug && (type == DATA || type == NODE))
blk_finish_plug(current->plug);
}
submit_bio(bio);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 64 | 92.75% | 3 | 60.00% |
michael christie | michael christie | 4 | 5.80% | 1 | 20.00% |
damien le moal | damien le moal | 1 | 1.45% | 1 | 20.00% |
| Total | 69 | 100.00% | 5 | 100.00% |
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
if (!io->bio)
return;
if (is_read_io(fio->op))
trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
__submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 89 | 84.76% | 9 | 81.82% |
michael christie | michael christie | 15 | 14.29% | 1 | 9.09% |
gu zheng | gu zheng | 1 | 0.95% | 1 | 9.09% |
| Total | 105 | 100.00% | 11 | 100.00% |
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
struct page *page, nid_t ino)
{
struct bio_vec *bvec;
struct page *target;
int i;
if (!io->bio)
return false;
if (!inode && !page && !ino)
return true;
bio_for_each_segment_all(bvec, io->bio, i) {
if (bvec->bv_page->mapping)
target = bvec->bv_page;
else
target = fscrypt_control_page(bvec->bv_page);
if (inode && inode == target->mapping->host)
return true;
if (page && page == target)
return true;
if (ino && ino == ino_of_node(target))
return true;
}
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 136 | 98.55% | 2 | 66.67% |
jaegeuk kim | jaegeuk kim | 2 | 1.45% | 1 | 33.33% |
| Total | 138 | 100.00% | 3 | 100.00% |
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, nid_t ino,
enum page_type type)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = &sbi->write_io[btype];
bool ret;
down_read(&io->io_rwsem);
ret = __has_merged_page(io, inode, page, ino);
up_read(&io->io_rwsem);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 85 | 100.00% | 2 | 100.00% |
| Total | 85 | 100.00% | 2 | 100.00% |
static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type, int rw)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io;
io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
down_write(&io->io_rwsem);
if (!__has_merged_page(io, inode, page, ino))
goto out;
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
io->fio.op = REQ_OP_WRITE;
io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
if (!test_opt(sbi, NOBARRIER))
io->fio.op_flags |= REQ_FUA;
}
__submit_merged_bio(io);
out:
up_write(&io->io_rwsem);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 99 | 61.88% | 5 | 55.56% |
chao yu | chao yu | 38 | 23.75% | 2 | 22.22% |
christoph hellwig | christoph hellwig | 13 | 8.12% | 1 | 11.11% |
michael christie | michael christie | 10 | 6.25% | 1 | 11.11% |
| Total | 160 | 100.00% | 9 | 100.00% |
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
int rw)
{
__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 32 | 100.00% | 1 | 100.00% |
| Total | 32 | 100.00% | 1 | 100.00% |
void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type, int rw)
{
if (has_merged_page(sbi, inode, page, ino, type))
__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 60 | 100.00% | 1 | 100.00% |
| Total | 60 | 100.00% | 1 | 100.00% |
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
f2fs_submit_merged_bio(sbi, DATA, WRITE);
f2fs_submit_merged_bio(sbi, NODE, WRITE);
f2fs_submit_merged_bio(sbi, META, WRITE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 37 | 100.00% | 1 | 100.00% |
| Total | 37 | 100.00% | 1 | 100.00% |
/*
* Fill the locked page with data located in the block address.
* Return unlocked page.
*/
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
bio_set_op_attrs(bio, fio->op, fio->op_flags);
__submit_bio(fio->sbi, bio, fio->type);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 104 | 83.87% | 8 | 66.67% |
michael christie | michael christie | 13 | 10.48% | 1 | 8.33% |
gu zheng | gu zheng | 4 | 3.23% | 1 | 8.33% |
kirill a. shutemov | kirill a. shutemov | 2 | 1.61% | 1 | 8.33% |
chao yu | chao yu | 1 | 0.81% | 1 | 8.33% |
| Total | 124 | 100.00% | 12 | 100.00% |
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io;
bool is_read = is_read_io(fio->op);
struct page *bio_page;
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
if (fio->old_blkaddr != NEW_ADDR)
verify_block_addr(sbi, fio->old_blkaddr);
verify_block_addr(sbi, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
if (!is_read)
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
down_write(&io->io_rwsem);
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
io->bio = __bio_alloc(sbi, fio->new_blkaddr,
BIO_MAX_PAGES, is_read);
io->fio = *fio;
}
if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
up_write(&io->io_rwsem);
trace_f2fs_submit_page_mbio(fio->page, fio);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 194 | 68.31% | 9 | 56.25% |
chao yu | chao yu | 54 | 19.01% | 3 | 18.75% |
michael christie | michael christie | 15 | 5.28% | 1 | 6.25% |
gu zheng | gu zheng | 10 | 3.52% | 1 | 6.25% |
fan li | fan li | 9 | 3.17% | 1 | 6.25% |
kirill a. shutemov | kirill a. shutemov | 2 | 0.70% | 1 | 6.25% |
| Total | 284 | 100.00% | 16 | 100.00% |
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn = F2FS_NODE(dn->node_page);
__le32 *addr_array;
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 38 | 77.55% | 4 | 80.00% |
chao yu | chao yu | 11 | 22.45% | 1 | 20.00% |
| Total | 49 | 100.00% | 5 | 100.00% |
/*
* Lock ordering for the change of data block address:
* ->data_page
* ->node_page
* update block addresses in the node page
*/
void set_data_blkaddr(struct dnode_of_data *dn)
{
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
__set_data_blkaddr(dn);
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 27 | 65.85% | 1 | 20.00% |
jaegeuk kim | jaegeuk kim | 14 | 34.15% | 4 | 80.00% |
| Total | 41 | 100.00% | 5 | 100.00% |
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
dn->data_blkaddr = blkaddr;
set_data_blkaddr(dn);
f2fs_update_extent_cache(dn);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 29 | 100.00% | 1 | 100.00% |
| Total | 29 | 100.00% | 1 | 100.00% |
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
if (!count)
return 0;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr =
datablock_addr(dn->node_page, dn->ofs_in_node);
if (blkaddr == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR;
__set_data_blkaddr(dn);
count--;
}
}
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 85 | 50.30% | 5 | 71.43% |
chao yu | chao yu | 84 | 49.70% | 2 | 28.57% |
| Total | 169 | 100.00% | 7 | 100.00% |
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
unsigned int ofs_in_node = dn->ofs_in_node;
int ret;
ret = reserve_new_blocks(dn, 1);
dn->ofs_in_node = ofs_in_node;
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 39 | 100.00% | 1 | 100.00% |
| Total | 39 | 100.00% | 1 | 100.00% |
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
bool need_put = dn->inode_page ? false : true;
int err;
err = get_dnode_of_data(dn, index, ALLOC_NODE);
if (err)
return err;
if (dn->data_blkaddr == NULL_ADDR)
err = reserve_new_block(dn);
if (err || need_put)
f2fs_put_dnode(dn);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 74 | 100.00% | 5 | 100.00% |
| Total | 74 | 100.00% | 5 | 100.00% |
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
struct extent_info ei;
struct inode *inode = dn->inode;
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn->data_blkaddr = ei.blk + index - ei.fofs;
return 0;
}
return f2fs_reserve_block(dn, index);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fan li | fan li | 33 | 50.77% | 1 | 25.00% |
jaegeuk kim | jaegeuk kim | 29 | 44.62% | 2 | 50.00% |
chao yu | chao yu | 3 | 4.62% | 1 | 25.00% |
| Total | 65 | 100.00% | 4 | 100.00% |
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
int op_flags, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
struct extent_info ei;
int err;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
.op = REQ_OP_READ,
.op_flags = op_flags,
.encrypted_page = NULL,
};
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return read_mapping_page(mapping, index, NULL);
page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
return ERR_PTR(-ENOMEM);
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn.data_blkaddr = ei.blk + index - ei.fofs;
goto got_it;
}
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err)
goto put_err;
f2fs_put_dnode(&dn);
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
err = -ENOENT;
goto put_err;
}
got_it:
if (PageUptodate(page)) {
unlock_page(page);
return page;
}
/*
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
* In such the case, its blkaddr can be remained as NEW_ADDR.
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
return page;
}
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
fio.page = page;
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_err;
return page;
put_err:
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 269 | 81.76% | 13 | 65.00% |
chao yu | chao yu | 51 | 15.50% | 5 | 25.00% |
michael christie | michael christie | 8 | 2.43% | 1 | 5.00% |
kirill a. shutemov | kirill a. shutemov | 1 | 0.30% | 1 | 5.00% |
| Total | 329 | 100.00% | 20 | 100.00% |
struct page *find_data_page(struct inode *inode, pgoff_t index)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
page = find_get_page(mapping, index);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
page = get_read_data_page(inode, index, 0, false);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
return page;
wait_on_page_locked(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 0);
return ERR_PTR(-EIO);
}
return page;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 79 | 63.20% | 6 | 60.00% |
chao yu | chao yu | 45 | 36.00% | 3 | 30.00% |
christoph hellwig | christoph hellwig | 1 | 0.80% | 1 | 10.00% |
| Total | 125 | 100.00% | 10 | 100.00% |
/*
* If it tries to access a hole, return an error.
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
repeat:
page = get_read_data_page(inode, index, 0, for_write);
if (IS_ERR(page))
return page;
/* wait for read completion */
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
return page;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 58 | 50.00% | 7 | 58.33% |
chao yu | chao yu | 57 | 49.14% | 4 | 33.33% |
christoph hellwig | christoph hellwig | 1 | 0.86% | 1 | 8.33% |
| Total | 116 | 100.00% | 12 | 100.00% |
/*
* Caller ensures that this data page is never allocated.
* A new zero-filled data page is allocated in the page cache.
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
* Note that, ipage is set only by make_empty_dir, and if any error occur,
* ipage should be released by this function.
*/
struct page *get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
struct dnode_of_data dn;
int err;
page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
* before exiting, we should make sure ipage will be released
* if any error occur.
*/
f2fs_put_page(ipage, 1);
return ERR_PTR(-ENOMEM);
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
err = f2fs_reserve_block(&dn, index);
if (err) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
if (