Release 4.10 fs/f2fs/checkpoint.c
/*
* fs/f2fs/checkpoint.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/f2fs_fs.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "trace.h"
#include <trace/events/f2fs.h>
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
set_ckpt_flags(sbi, CP_ERROR_FLAG);
sbi->sb->s_flags |= MS_RDONLY;
if (!end_io)
f2fs_flush_merged_bios(sbi);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 38 | 100.00% | 1 | 100.00% |
| Total | 38 | 100.00% | 1 | 100.00% |
/*
* We guarantee no failure on the returned page.
*/
struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
struct address_space *mapping = META_MAPPING(sbi);
struct page *page = NULL;
repeat:
page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
}
f2fs_wait_on_page_writeback(page, META, true);
if (!PageUptodate(page))
SetPageUptodate(page);
return page;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 80 | 96.39% | 5 | 83.33% |
gu zheng | gu zheng | 3 | 3.61% | 1 | 16.67% |
| Total | 83 | 100.00% | 6 | 100.00% |
/*
* We guarantee no failure on the returned page.
*/
static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
bool is_meta)
{
struct address_space *mapping = META_MAPPING(sbi);
struct page *page;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
.op = REQ_OP_READ,
.op_flags = REQ_META | REQ_PRIO,
.old_blkaddr = index,
.new_blkaddr = index,
.encrypted_page = NULL,
};
if (unlikely(!is_meta))
fio.op_flags &= ~REQ_META;
repeat:
page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
}
if (PageUptodate(page))
goto out;
fio.page = page;
if (f2fs_submit_page_bio(&fio)) {
f2fs_put_page(page, 1);
goto repeat;
}
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
/*
* if there is any IO error when accessing device, make our filesystem
* readonly and make sure do not write checkpoint with non-uptodate
* meta page.
*/
if (unlikely(!PageUptodate(page)))
f2fs_stop_checkpoint(sbi, false);
out:
return page;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 154 | 74.76% | 11 | 68.75% |
chao yu | chao yu | 42 | 20.39% | 3 | 18.75% |
michael christie | michael christie | 7 | 3.40% | 1 | 6.25% |
gu zheng | gu zheng | 3 | 1.46% | 1 | 6.25% |
| Total | 206 | 100.00% | 16 | 100.00% |
struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
return __get_meta_page(sbi, index, true);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 25 | 100.00% | 1 | 100.00% |
| Total | 25 | 100.00% | 1 | 100.00% |
/* for POR only */
struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
return __get_meta_page(sbi, index, false);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 25 | 100.00% | 1 | 100.00% |
| Total | 25 | 100.00% | 1 | 100.00% |
bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
{
switch (type) {
case META_NAT:
break;
case META_SIT:
if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
return false;
break;
case META_SSA:
if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
blkaddr < SM_I(sbi)->ssa_blkaddr))
return false;
break;
case META_CP:
if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
blkaddr < __start_cp_addr(sbi)))
return false;
break;
case META_POR:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
blkaddr < MAIN_BLKADDR(sbi)))
return false;
break;
default:
BUG();
}
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 126 | 94.03% | 4 | 66.67% |
jaegeuk kim | jaegeuk kim | 8 | 5.97% | 2 | 33.33% |
| Total | 134 | 100.00% | 6 | 100.00% |
/*
* Readahead CP/NAT/SIT/SSA pages
*/
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
{
struct page *page;
block_t blkno = start;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
.op = REQ_OP_READ,
.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
.encrypted_page = NULL,
};
struct blk_plug plug;
if (unlikely(type == META_POR))
fio.op_flags &= ~REQ_META;
blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
if (!is_valid_blkaddr(sbi, blkno, type))
goto out;
switch (type) {
case META_NAT:
if (unlikely(blkno >=
NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
blkno = 0;
/* get nat block addr */
fio.new_blkaddr = current_nat_addr(sbi,
blkno * NAT_ENTRY_PER_BLOCK);
break;
case META_SIT:
/* get sit block addr */
fio.new_blkaddr = current_sit_addr(sbi,
blkno * SIT_ENTRY_PER_BLOCK);
break;
case META_SSA:
case META_CP:
case META_POR:
fio.new_blkaddr = blkno;
break;
default:
BUG();
}
page = f2fs_grab_cache_page(META_MAPPING(sbi),
fio.new_blkaddr, false);
if (!page)
continue;
if (PageUptodate(page)) {
f2fs_put_page(page, 1);
continue;
}
fio.page = page;
fio.old_blkaddr = fio.new_blkaddr;
f2fs_submit_page_mbio(&fio);
f2fs_put_page(page, 0);
}
out:
f2fs_submit_merged_bio(sbi, META, READ);
blk_finish_plug(&plug);
return blkno - start;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 255 | 86.15% | 7 | 50.00% |
jaegeuk kim | jaegeuk kim | 33 | 11.15% | 5 | 35.71% |
michael christie | michael christie | 7 | 2.36% | 1 | 7.14% |
christoph hellwig | christoph hellwig | 1 | 0.34% | 1 | 7.14% |
| Total | 296 | 100.00% | 14 | 100.00% |
void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
{
struct page *page;
bool readahead = false;
page = find_get_page(META_MAPPING(sbi), index);
if (!page || !PageUptodate(page))
readahead = true;
f2fs_put_page(page, 0);
if (readahead)
ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 73 | 98.65% | 2 | 66.67% |
jaegeuk kim | jaegeuk kim | 1 | 1.35% | 1 | 33.33% |
| Total | 74 | 100.00% | 3 | 100.00% |
static int f2fs_write_meta_page(struct page *page,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
trace_f2fs_writepage(page, META);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
write_meta_page(sbi, page);
dec_page_count(sbi, F2FS_DIRTY_META);
if (wbc->for_reclaim)
f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
unlock_page(page);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, META, WRITE);
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 99 | 63.87% | 10 | 71.43% |
chao yu | chao yu | 56 | 36.13% | 4 | 28.57% |
| Total | 155 | 100.00% | 14 | 100.00% |
static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
long diff, written;
/* collect a number of dirty meta pages and write together */
if (wbc->for_kupdate ||
get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, META);
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
skip_write:
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
trace_f2fs_writepages(mapping->host, wbc, META);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 135 | 85.99% | 5 | 83.33% |
yunlei he | yunlei he | 22 | 14.01% | 1 | 16.67% |
| Total | 157 | 100.00% | 6 | 100.00% |
long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
long nr_to_write)
{
struct address_space *mapping = META_MAPPING(sbi);
pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
struct pagevec pvec;
long nwritten = 0;
struct writeback_control wbc = {
.for_reclaim = 0,
};
struct blk_plug plug;
pagevec_init(&pvec, 0);
blk_start_plug(&plug);
while (index <= end) {
int i, nr_pages;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_DIRTY,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (unlikely(nr_pages == 0))
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (prev == ULONG_MAX)
prev = page->index - 1;
if (nr_to_write != LONG_MAX && page->index != prev + 1) {
pagevec_release(&pvec);
goto stop;
}
lock_page(page);
if (unlikely(page->mapping != mapping)) {
continue_unlock:
unlock_page(page);
continue;
}
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
f2fs_wait_on_page_writeback(page, META, true);
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
if (mapping->a_ops->writepage(page, &wbc)) {
unlock_page(page);
break;
}
nwritten++;
prev = page->index;
if (unlikely(nwritten >= nr_to_write))
break;
}
pagevec_release(&pvec);
cond_resched();
}
stop:
if (nwritten)
f2fs_submit_merged_bio(sbi, type, WRITE);
blk_finish_plug(&plug);
return nwritten;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 304 | 89.41% | 6 | 54.55% |
chao yu | chao yu | 33 | 9.71% | 4 | 36.36% |
gu zheng | gu zheng | 3 | 0.88% | 1 | 9.09% |
| Total | 340 | 100.00% | 11 | 100.00% |
static int f2fs_set_meta_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, META);
if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) {
f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);
return 1;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 67 | 93.06% | 6 | 85.71% |
chao yu | chao yu | 5 | 6.94% | 1 | 14.29% |
| Total | 72 | 100.00% | 7 | 100.00% |
const struct address_space_operations f2fs_meta_aops = {
.writepage = f2fs_write_meta_page,
.writepages = f2fs_write_meta_pages,
.set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
struct inode_management *im = &sbi->im[type];
struct ino_entry *e, *tmp;
tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
retry:
radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
if (!e) {
e = tmp;
if (radix_tree_insert(&im->ino_root, ino, e)) {
spin_unlock(&im->ino_lock);
radix_tree_preload_end();
goto retry;
}
memset(e, 0, sizeof(struct ino_entry));
e->ino = ino;
list_add_tail(&e->list, &im->ino_list);
if (type != ORPHAN_INO)
im->ino_num++;
}
spin_unlock(&im->ino_lock);
radix_tree_preload_end();
if (e != tmp)
kmem_cache_free(ino_entry_slab, tmp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 150 | 81.52% | 7 | 63.64% |
chao yu | chao yu | 21 | 11.41% | 2 | 18.18% |
gu zheng | gu zheng | 13 | 7.07% | 2 | 18.18% |
| Total | 184 | 100.00% | 11 | 100.00% |
static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
struct inode_management *im = &sbi->im[type];
struct ino_entry *e;
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
if (e) {
list_del(&e->list);
radix_tree_delete(&im->ino_root, ino);
im->ino_num--;
spin_unlock(&im->ino_lock);
kmem_cache_free(ino_entry_slab, e);
return;
}
spin_unlock(&im->ino_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 88 | 81.48% | 5 | 71.43% |
chao yu | chao yu | 19 | 17.59% | 1 | 14.29% |
gu zheng | gu zheng | 1 | 0.93% | 1 | 14.29% |
| Total | 108 | 100.00% | 7 | 100.00% |
void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* add new dirty ino entry into list */
__add_ino_entry(sbi, ino, type);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 25 | 96.15% | 1 | 50.00% |
chao yu | chao yu | 1 | 3.85% | 1 | 50.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* remove dirty ino entry from list */
__remove_ino_entry(sbi, ino, type);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 25 | 96.15% | 1 | 50.00% |
chao yu | chao yu | 1 | 3.85% | 1 | 50.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
/* mode should be APPEND_INO or UPDATE_INO */
bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
{
struct inode_management *im = &sbi->im[mode];
struct ino_entry *e;
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
spin_unlock(&im->ino_lock);
return e ? true : false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 53 | 76.81% | 1 | 50.00% |
chao yu | chao yu | 16 | 23.19% | 1 | 50.00% |
| Total | 69 | 100.00% | 2 | 100.00% |
void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
{
struct ino_entry *e, *tmp;
int i;
for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
struct inode_management *im = &sbi->im[i];
spin_lock(&im->ino_lock);
list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
list_del(&e->list);
radix_tree_delete(&im->ino_root, e->ino);
kmem_cache_free(ino_entry_slab, e);
im->ino_num--;
}
spin_unlock(&im->ino_lock);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 98 | 84.48% | 3 | 60.00% |
chao yu | chao yu | 18 | 15.52% | 2 | 40.00% |
| Total | 116 | 100.00% | 5 | 100.00% |
int acquire_orphan_inode(struct f2fs_sb_info *sbi)
{
struct inode_management *im = &sbi->im[ORPHAN_INO];
int err = 0;
spin_lock(&im->ino_lock);
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
return -ENOSPC;
}
#endif
if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC;
else
im->ino_num++;
spin_unlock(&im->ino_lock);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 80 | 80.81% | 4 | 66.67% |
chao yu | chao yu | 19 | 19.19% | 2 | 33.33% |
| Total | 99 | 100.00% | 6 | 100.00% |
void release_orphan_inode(struct f2fs_sb_info *sbi)
{
struct inode_management *im = &sbi->im[ORPHAN_INO];
spin_lock(&im->ino_lock);
f2fs_bug_on(sbi, im->ino_num == 0);
im->ino_num--;
spin_unlock(&im->ino_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 26 | 47.27% | 6 | 66.67% |
chao yu | chao yu | 22 | 40.00% | 2 | 22.22% |
russ w. knize | russ w. knize | 7 | 12.73% | 1 | 11.11% |
| Total | 55 | 100.00% | 9 | 100.00% |
void add_orphan_inode(struct inode *inode)
{
/* add new orphan ino entry into list */
__add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
update_inode_page(inode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 26 | 86.67% | 5 | 83.33% |
chao yu | chao yu | 4 | 13.33% | 1 | 16.67% |
| Total | 30 | 100.00% | 6 | 100.00% |
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
/* remove orphan entry from orphan list */
__remove_ino_entry(sbi, ino, ORPHAN_INO);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 23 | 100.00% | 3 | 100.00% |
| Total | 23 | 100.00% | 3 | 100.00% |
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
struct node_info ni;
int err = acquire_orphan_inode(sbi);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: orphan failed (ino=%x), run fsck to fix.",
__func__, ino);
return err;
}
__add_ino_entry(sbi, ino, ORPHAN_INO);
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
/*
* there should be a bug that we can't find the entry
* to orphan inode.
*/
f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
return PTR_ERR(inode);
}
clear_nlink(inode);
/* truncate all the data during iput */
iput(inode);
get_node_info(sbi, ino, &ni);
/* ENOMEM was fully retried in f2fs_evict_inode. */
if (ni.blk_addr != NULL_ADDR) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: orphan failed (ino=%x), run fsck to fix.",
__func__, ino);
return -EIO;
}
__remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 153 | 84.53% | 5 | 83.33% |
chao yu | chao yu | 28 | 15.47% | 1 | 16.67% |
| Total | 181 | 100.00% | 6 | 100.00% |
int recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
block_t start_blk, orphan_blocks, i, j;
int err;
if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
for (i = 0; i < orphan_blocks; i++) {
struct page *page = get_meta_page(sbi, start_blk + i);
struct f2fs_orphan_block *orphan_blk;
orphan_blk = (struct f2fs_orphan_block *)page_address(page);
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
err = recover_orphan_inode(sbi, ino);
if (err) {
f2fs_put_page(page, 1);
return err;
}
}
f2fs_put_page(page, 1);
}
/* clear Orphan Flag */
clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 144 | 72.73% | 2 | 25.00% |
chao yu | chao yu | 40 | 20.20% | 3 | 37.50% |
wanpeng li | wanpeng li | 10 | 5.05% | 2 | 25.00% |
changman lee | changman lee | 4 | 2.02% | 1 | 12.50% |
| Total | 198 | 100.00% | 8 | 100.00% |
static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
{
struct list_head *head;
struct f2fs_orphan_block *orphan_blk = NULL;
unsigned int nentries = 0;
unsigned short index = 1;
unsigned short orphan_blocks;
struct page *page = NULL;
struct ino_entry *orphan = NULL;
struct inode_management *im = &sbi->im[ORPHAN_INO];
orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
/*
* we don't need to do spin_lock(&im->ino_lock) here, since all the
* orphan inode operations are covered under f2fs_lock_op().
* And, spin_lock should be avoided due to page operations below.
*/
head = &im->ino_list;
/* loop for each orphan inode entry and write them in Jornal block */
list_for_each_entry(orphan, head, list) {
if (!page) {
page = grab_meta_page(sbi, start_blk++);
orphan_blk =
(struct f2fs_orphan_block *)page_address(page);
memset(orphan_blk, 0, sizeof(*orphan_blk));
}
orphan_blk->ino[nentries++]