Release 4.10 fs/ext4/extents.c
/*
* Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
* Written by Alex Tomas <alex@clusterfs.com>
*
* Architecture independence:
* Copyright (c) 2005, Bull S.A.
* Written by Pierre Peiffer <pierre.peiffer@bull.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
/*
* Extents support for EXT4
*
* TODO:
* - ext4*_error() should be used in some situations
* - analyze all BUG()/BUG_ON(), use -EIO where appropriate
* - smart tree reduction
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/fiemap.h>
#include <linux/backing-dev.h>
#include "ext4_jbd2.h"
#include "ext4_extents.h"
#include "xattr.h"
#include <trace/events/ext4.h>
/*
* used by extent splitting.
*/
#define EXT4_EXT_MAY_ZEROOUT 0x1
/* safe to zeroout if split fails \
due to ENOSPC */
#define EXT4_EXT_MARK_UNWRIT1 0x2
/* mark first half unwritten */
#define EXT4_EXT_MARK_UNWRIT2 0x4
/* mark second half unwritten */
#define EXT4_EXT_DATA_VALID1 0x8
/* first half contains valid data */
#define EXT4_EXT_DATA_VALID2 0x10
/* second half contains valid data */
static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
EXT4_EXTENT_TAIL_OFFSET(eh));
return cpu_to_le32(csum);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
darrick j. wong | darrick j. wong | 69 | 100.00% | 1 | 100.00% |
| Total | 69 | 100.00% | 1 | 100.00% |
static int ext4_extent_block_csum_verify(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_extent_tail *et;
if (!ext4_has_metadata_csum(inode->i_sb))
return 1;
et = find_ext4_extent_tail(eh);
if (et->et_checksum != ext4_extent_block_csum(inode, eh))
return 0;
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
darrick j. wong | darrick j. wong | 59 | 98.33% | 1 | 50.00% |
dmitriy monakhov | dmitriy monakhov | 1 | 1.67% | 1 | 50.00% |
| Total | 60 | 100.00% | 2 | 100.00% |
static void ext4_extent_block_csum_set(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_extent_tail *et;
if (!ext4_has_metadata_csum(inode->i_sb))
return;
et = find_ext4_extent_tail(eh);
et->et_checksum = ext4_extent_block_csum(inode, eh);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
darrick j. wong | darrick j. wong | 49 | 98.00% | 1 | 50.00% |
dmitriy monakhov | dmitriy monakhov | 1 | 2.00% | 1 | 50.00% |
| Total | 50 | 100.00% | 2 | 100.00% |
static int ext4_split_extent(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_map_blocks *map,
int split_flag,
int flags);
static int ext4_split_extent_at(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
ext4_lblk_t split,
int split_flag,
int flags);
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes);
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
{
int err;
if (!ext4_handle_valid(handle))
return 0;
if (handle->h_buffer_credits >= needed)
return 0;
/*
* If we need to extend the journal get a few extra blocks
* while we're at it for efficiency's sake.
*/
needed += 3;
err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
if (err <= 0)
return err;
err = ext4_truncate_restart_trans(handle, inode, needed);
if (err == 0)
err = -EAGAIN;
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 38 | 40.00% | 1 | 14.29% |
jan kara | jan kara | 15 | 15.79% | 1 | 14.29% |
theodore tso | theodore tso | 12 | 12.63% | 2 | 28.57% |
frank mayhar | frank mayhar | 11 | 11.58% | 1 | 14.29% |
dmitriy monakhov | dmitriy monakhov | 10 | 10.53% | 1 | 14.29% |
shen feng | shen feng | 9 | 9.47% | 1 | 14.29% |
| Total | 95 | 100.00% | 7 | 100.00% |
/*
* could return:
* - EROFS
* - ENOMEM
*/
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
if (path->p_bh) {
/* path points to block */
BUFFER_TRACE(path->p_bh, "get_write_access");
return ext4_journal_get_write_access(handle, path->p_bh);
}
/* path points to leaf/index in inode body */
/* we use in-core data, no need to protect them */
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 44 | 83.02% | 1 | 50.00% |
liang xie | liang xie | 9 | 16.98% | 1 | 50.00% |
| Total | 53 | 100.00% | 2 | 100.00% |
/*
* could return:
* - EROFS
* - ENOMEM
* - EIO
*/
int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
struct inode *inode, struct ext4_ext_path *path)
{
int err;
WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
if (path->p_bh) {
ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
/* path points to block */
err = __ext4_handle_dirty_metadata(where, line, handle,
inode, path->p_bh);
} else {
/* path points to leaf/index in inode body */
err = ext4_mark_inode_dirty(handle, inode);
}
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 56 | 56.00% | 1 | 20.00% |
theodore tso | theodore tso | 15 | 15.00% | 1 | 20.00% |
dmitriy monakhov | dmitriy monakhov | 15 | 15.00% | 1 | 20.00% |
darrick j. wong | darrick j. wong | 12 | 12.00% | 1 | 20.00% |
frank mayhar | frank mayhar | 2 | 2.00% | 1 | 20.00% |
| Total | 100 | 100.00% | 5 | 100.00% |
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t block)
{
if (path) {
int depth = path->p_depth;
struct ext4_extent *ex;
/*
* Try to predict block placement assuming that we are
* filling in a file which will eventually be
* non-sparse --- i.e., in the case of libbfd writing
* an ELF object sections out-of-order but in a way
* the eventually results in a contiguous object or
* executable file, or some database extending a table
* space file. However, this is actually somewhat
* non-ideal if we are writing a sparse file such as
* qemu or KVM writing a raw image file that is going
* to stay fairly sparse, since it will end up
* fragmenting the file system's free space. Maybe we
* should have some hueristics or some way to allow
* userspace to pass a hint to file system,
* especially if the latter case turns out to be
* common.
*/
ex = path[depth].p_ext;
if (ex) {
ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
if (block > ext_block)
return ext_pblk + (block - ext_block);
else
return ext_pblk - (ext_block - block);
}
/* it looks like index is empty;
* try to find starting block from index itself */
if (path[depth].p_bh)
return path[depth].p_bh->b_blocknr;
}
/* OK. use inode's group */
return ext4_inode_to_goal_block(inode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 73 | 59.35% | 2 | 18.18% |
kazuya mio | kazuya mio | 32 | 26.02% | 1 | 9.09% |
yongqiang yang | yongqiang yang | 7 | 5.69% | 1 | 9.09% |
avantika mathur | avantika mathur | 4 | 3.25% | 1 | 9.09% |
theodore tso | theodore tso | 3 | 2.44% | 2 | 18.18% |
aneesh kumar | aneesh kumar | 1 | 0.81% | 1 | 9.09% |
tao ma | tao ma | 1 | 0.81% | 1 | 9.09% |
eric sandeen | eric sandeen | 1 | 0.81% | 1 | 9.09% |
randy dunlap | randy dunlap | 1 | 0.81% | 1 | 9.09% |
| Total | 123 | 100.00% | 11 | 100.00% |
/*
* Allocation for a meta data block
*/
static ext4_fsblk_t
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex, int *err, unsigned int flags)
{
ext4_fsblk_t goal, newblock;
goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
NULL, err);
return newblock;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 64 | 86.49% | 2 | 33.33% |
allison henderson | allison henderson | 6 | 8.11% | 1 | 16.67% |
theodore tso | theodore tso | 3 | 4.05% | 2 | 33.33% |
aneesh kumar | aneesh kumar | 1 | 1.35% | 1 | 16.67% |
| Total | 74 | 100.00% | 6 | 100.00% |
static inline int ext4_ext_space_block(struct inode *inode, int check)
{
int size;
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent);
#ifdef AGGRESSIVE_TEST
if (!check && size > 6)
size = 6;
#endif
return size;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 53 | 86.89% | 1 | 25.00% |
theodore tso | theodore tso | 4 | 6.56% | 1 | 25.00% |
yongqiang yang | yongqiang yang | 3 | 4.92% | 1 | 25.00% |
robert p. j. day | robert p. j. day | 1 | 1.64% | 1 | 25.00% |
| Total | 61 | 100.00% | 4 | 100.00% |
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
{
int size;
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx);
#ifdef AGGRESSIVE_TEST
if (!check && size > 5)
size = 5;
#endif
return size;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 53 | 86.89% | 1 | 25.00% |
theodore tso | theodore tso | 4 | 6.56% | 1 | 25.00% |
yongqiang yang | yongqiang yang | 3 | 4.92% | 1 | 25.00% |
robert p. j. day | robert p. j. day | 1 | 1.64% | 1 | 25.00% |
| Total | 61 | 100.00% | 4 | 100.00% |
static inline int ext4_ext_space_root(struct inode *inode, int check)
{
int size;
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent);
#ifdef AGGRESSIVE_TEST
if (!check && size > 3)
size = 3;
#endif
return size;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 59 | 88.06% | 1 | 25.00% |
theodore tso | theodore tso | 4 | 5.97% | 1 | 25.00% |
yongqiang yang | yongqiang yang | 3 | 4.48% | 1 | 25.00% |
robert p. j. day | robert p. j. day | 1 | 1.49% | 1 | 25.00% |
| Total | 67 | 100.00% | 4 | 100.00% |
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
{
int size;
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent_idx);
#ifdef AGGRESSIVE_TEST
if (!check && size > 4)
size = 4;
#endif
return size;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 59 | 88.06% | 1 | 25.00% |
theodore tso | theodore tso | 4 | 5.97% | 1 | 25.00% |
yongqiang yang | yongqiang yang | 3 | 4.48% | 1 | 25.00% |
robert p. j. day | robert p. j. day | 1 | 1.49% | 1 | 25.00% |
| Total | 67 | 100.00% | 4 | 100.00% |
static inline int
ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath, ext4_lblk_t lblk,
int nofail)
{
struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
(nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dmitriy monakhov | dmitriy monakhov | 72 | 86.75% | 1 | 50.00% |
theodore tso | theodore tso | 11 | 13.25% | 1 | 50.00% |
| Total | 83 | 100.00% | 2 | 100.00% |
/*
* Calculate the number of metadata blocks needed
* to allocate @blocks
* Worse case is one block per extent
*/
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
int idxs;
idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx));
/*
* If the new delayed allocation block is contiguous with the
* previous da block, it can share index blocks with the
* previous block, so we only need to allocate a new index
* block every idxs leaf blocks. At ldxs**2 blocks, we need
* an additional index block, and at ldxs**3 blocks, yet
* another index blocks.
*/
if (ei->i_da_metadata_calc_len &&
ei->i_da_metadata_calc_last_lblock+1 == lblock) {
int num = 0;
if ((ei->i_da_metadata_calc_len % idxs) == 0)
num++;
if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
num++;
if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
num++;
ei->i_da_metadata_calc_len = 0;
} else
ei->i_da_metadata_calc_len++;
ei->i_da_metadata_calc_last_lblock++;
return num;
}
/*
* In the worst case we need a new set of index blocks at
* every level of the inode's extent tree.
*/
ei->i_da_metadata_calc_len = 1;
ei->i_da_metadata_calc_last_lblock = lblock;
return ext_depth(inode) + 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
theodore tso | theodore tso | 128 | 75.29% | 2 | 50.00% |
mingming cao | mingming cao | 37 | 21.76% | 1 | 25.00% |
yongqiang yang | yongqiang yang | 5 | 2.94% | 1 | 25.00% |
| Total | 170 | 100.00% | 4 | 100.00% |
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
int max;
if (depth == ext_depth(inode)) {
if (depth == 0)
max = ext4_ext_space_root(inode, 1);
else
max = ext4_ext_space_root_idx(inode, 1);
} else {
if (depth == 0)
max = ext4_ext_space_block(inode, 1);
else
max = ext4_ext_space_block_idx(inode, 1);
}
return max;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 76 | 90.48% | 1 | 50.00% |
theodore tso | theodore tso | 8 | 9.52% | 1 | 50.00% |
| Total | 84 | 100.00% | 2 | 100.00% |
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
/*
* We allow neither:
* - zero length
* - overflow/wrap-around
*/
if (lblock + len <= lblock)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
aneesh kumar | aneesh kumar | 34 | 49.28% | 1 | 11.11% |
theodore tso | theodore tso | 17 | 24.64% | 4 | 44.44% |
eryu guan | eryu guan | 12 | 17.39% | 2 | 22.22% |
vegard nossum | vegard nossum | 4 | 5.80% | 1 | 11.11% |
alex tomas | alex tomas | 2 | 2.90% | 1 | 11.11% |
| Total | 69 | 100.00% | 9 | 100.00% |
static int ext4_valid_extent_idx(struct inode *inode,
struct ext4_extent_idx *ext_idx)
{
ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
aneesh kumar | aneesh kumar | 27 | 69.23% | 1 | 25.00% |
theodore tso | theodore tso | 12 | 30.77% | 3 | 75.00% |
| Total | 39 | 100.00% | 4 | 100.00% |
static int ext4_valid_extent_entries(struct inode *inode,
struct ext4_extent_header *eh,
int depth)
{
unsigned short entries;
if (eh->eh_entries == 0)
return 1;
entries = le16_to_cpu(eh->eh_entries);
if (depth == 0) {
/* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
ext4_fsblk_t pblock = 0;
ext4_lblk_t lblock = 0;
ext4_lblk_t prev = 0;
int len = 0;
while (entries) {
if (!ext4_valid_extent(inode, ext))
return 0;
/* Check for overlapping extents */
lblock = le32_to_cpu(ext->ee_block);
len = ext4_ext_get_actual_len(ext);
if ((lblock <= prev) && prev) {
pblock = ext4_ext_pblock(ext);
es->s_last_error_block = cpu_to_le64(pblock);
return 0;
}
ext++;
entries--;
prev = lblock + len - 1;
}
} else {
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
while (entries) {
if (!ext4_valid_extent_idx(inode, ext_idx))
return 0;
ext_idx++;
entries--;
}
}
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
aneesh kumar | aneesh kumar | 118 | 54.13% | 1 | 33.33% |
eryu guan | eryu guan | 90 | 41.28% | 1 | 33.33% |
yongqiang yang | yongqiang yang | 10 | 4.59% | 1 | 33.33% |
| Total | 218 | 100.00% | 3 | 100.00% |
static int __ext4_ext_check(const char *function, unsigned int line,
struct inode *inode, struct ext4_extent_header *eh,
int depth, ext4_fsblk_t pblk)
{
const char *error_msg;
int max = 0, err = -EFSCORRUPTED;
if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
error_msg = "invalid magic";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
error_msg = "unexpected eh_depth";
goto corrupted;
}
if (unlikely(eh->eh_max == 0)) {
error_msg = "invalid eh_max";
goto corrupted;
}
max = ext4_ext_max_entries(inode, depth);
if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
error_msg = "too large eh_max";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
error_msg = "invalid eh_entries";
goto corrupted;
}
if (!ext4_valid_extent_entries(inode, eh, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
}
if (unlikely(depth > 32)) {
error_msg = "too large eh_depth";
goto corrupted;
}
/* Verify checksum on non-root extent tree nodes */
if (ext_depth(inode) != depth &&
!ext4_extent_block_csum_verify(inode, eh)) {
error_msg = "extent tree corrupted";
err = -EFSBADCRC;
goto corrupted;
}
return 0;
corrupted:
ext4_error_inode(inode, function, line, 0,
"pblk %llu bad header/extent: %s - magic %x, "
"entries %u, max %u(%u), depth %u(%u)",
(unsigned long long) pblk, error_msg,
le16_to_cpu(eh->eh_magic),
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
max, le16_to_cpu(eh->eh_depth), depth);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 200 | 66.01% | 1 | 12.50% |
darrick j. wong | darrick j. wong | 38 | 12.54% | 2 | 25.00% |
aneesh kumar | aneesh kumar | 24 | 7.92% | 1 | 12.50% |
theodore tso | theodore tso | 23 | 7.59% | 3 | 37.50% |
vegard nossum | vegard nossum | 18 | 5.94% | 1 | 12.50% |
| Total | 303 | 100.00% | 8 | 100.00% |
#define ext4_ext_check(inode, eh, depth, pblk) \
__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
int ext4_ext_check_inode(struct inode *inode)
{
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
aneesh kumar | aneesh kumar | 26 | 92.86% | 1 | 50.00% |
theodore tso | theodore tso | 2 | 7.14% | 1 | 50.00% |
| Total | 28 | 100.00% | 2 | 100.00% |
static struct buffer_head *
__read_extent_tree_block(const char *function, unsigned int line,
struct inode *inode, ext4_fsblk_t pblk, int depth,
int flags)
{
struct buffer_head *bh;
int err;
bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
err = bh_submit_read(bh);
if (err < 0)
goto errout;
}
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
return bh;
err = __ext4_ext_check(function, line, inode,
ext_block_hdr(bh), depth, pblk);
if (err)
goto errout;
set_buffer_verified(bh);
/*
* If this is a leaf block, cache all of its entries
*/
if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
struct ext4_extent_header *eh = ext_block_hdr(bh);
struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
ext4_lblk_t prev = 0;
int i;
for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
unsigned int status = EXTENT_STATUS_WRITTEN;
ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
int len = ext4_ext_get_actual_len(ex);
if (prev && (prev != lblk))
ext4_es_cache_extent(inode, prev,
lblk - prev, ~0,
EXTENT_STATUS_HOLE);
if (ext4_ext_is_unwritten(ex))
status = EXTENT_STATUS_UNWRITTEN;
ext4_es_cache_extent(inode, lblk, len,
ext4_ext_pblock(ex), status);
prev = lblk + len;
}
}
return bh;
errout:
put_bh(bh);
return ERR_PTR(err);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
theodore tso | theodore tso | 259 | 81.19% | 4 | 57.14% |
darrick j. wong | darrick j. wong | 54 | 16.93% | 1 | 14.29% |
nikolay borisov | nikolay borisov | 5 | 1.57% | 1 | 14.29% |
lukas czerner | lukas czerner | 1 | 0.31% | 1 | 14.29% |
| Total | 319 | 100.00% | 7 | 100.00% |
#define read_extent_tree_block(inode, pblk, depth, flags) \
__read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
(depth), (flags))
/*
* This function is called to cache a file's extent information in the
* extent status tree
*/
int ext4_ext_precache(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_ext_path *path = NULL;
struct buffer_head *bh;
int i = 0, depth, ret = 0;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return 0; /* not an extent-mapped inode */
down_read(&ei->i_data_sem);
depth = ext_depth(inode);
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
GFP_NOFS);
if (path == NULL) {
up_read(&ei->i_data_sem);
return -ENOMEM;
}
/* Don't cache anything if there are no external extent blocks */
if (depth == 0)
goto out;
path[0].p_hdr = ext_inode_hdr(inode);
ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
if (ret)
goto out;
path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
while (i >= 0) {
/*
* If this is a leaf block or we've reached the end of
* the index block, go up
*/
if ((i == depth) ||
path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
continue;
}
bh = read_extent_tree_block(inode,
ext4_idx_pblock(path[i].p_idx++),
depth - i - 1,
EXT4_EX_FORCE_CACHE);
if (IS_ERR(bh)) {
ret = PTR_ERR(bh);
break;
}
i++;
path[i].p_bh = bh;
path[i].p_hdr = ext_block_hdr(bh);
path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
}
ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
out:
up_read(&ei->i_data_sem);
ext4_ext_drop_refs(path);
kfree(path);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
theodore tso | theodore tso | 348 | 100.00% | 1 | 100.00% |
| Total | 348 | 100.00% | 1 | 100.00% |
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
int k, l = path->p_depth;
ext_debug("path:");
for (k = 0; k <= l; k++, path++) {
if (path->p_idx) {
ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
} else if (path->p_ext) {
ext_debug(" %d:[%d]%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_is_unwritten(path->p_ext),
ext4_ext_get_actual_len(path->p_ext),
ext4_ext_pblock(path->p_ext));
} else
ext_debug(" []");
}
ext_debug("\n");
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex tomas | alex tomas | 120 | 90.91% | 1 | 16.67% |
mingming cao | mingming cao | 8 | 6.06% | 2 | 33.33% |
theodore tso | theodore tso | 2 | 1.52% | 1 | 16.67% |
lukas czerner | lukas czerner | 1 | 0.76% | 1 | 16.67% |
amit arora | amit arora | 1 | 0.76% | 1 | 16.67% |
| Total | 132 | 100.00% | 6 | 100.00% |
static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
int depth = ext_depth(inode