Release 4.10 fs/f2fs/segment.c
/*
* fs/f2fs/segment.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/prefetch.h>
#include <linux/kthread.h>
#include <linux/swap.h>
#include <linux/timer.h>
#include "f2fs.h"
#include "segment.h"
#include "node.h"
#include "trace.h"
#include <trace/events/f2fs.h>
#define __reverse_ffz(x) __reverse_ffs(~(x))
static struct kmem_cache *discard_entry_slab;
static struct kmem_cache *bio_entry_slab;
static struct kmem_cache *sit_entry_set_slab;
static struct kmem_cache *inmem_entry_slab;
static unsigned long __reverse_ulong(unsigned char *str)
{
unsigned long tmp = 0;
int shift = 24, idx = 0;
#if BITS_PER_LONG == 64
shift = 56;
#endif
while (shift >= 0) {
tmp |= (unsigned long)str[idx++] << shift;
shift -= BITS_PER_BYTE;
}
return tmp;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 67 | 100.00% | 1 | 100.00% |
| Total | 67 | 100.00% | 1 | 100.00% |
/*
* __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
* MSB and LSB are reversed in a byte by f2fs_set_bit.
*/
static inline unsigned long __reverse_ffs(unsigned long word)
{
int num = 0;
#if BITS_PER_LONG == 64
if ((word & 0xffffffff00000000UL) == 0)
num += 32;
else
word >>= 32;
#endif
if ((word & 0xffff0000) == 0)
num += 16;
else
word >>= 16;
if ((word & 0xff00) == 0)
num += 8;
else
word >>= 8;
if ((word & 0xf0) == 0)
num += 4;
else
word >>= 4;
if ((word & 0xc) == 0)
num += 2;
else
word >>= 2;
if ((word & 0x2) == 0)
num += 1;
return num;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
changman lee | changman lee | 130 | 95.59% | 1 | 50.00% |
jaegeuk kim | jaegeuk kim | 6 | 4.41% | 1 | 50.00% |
| Total | 136 | 100.00% | 2 | 100.00% |
/*
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
* f2fs_set_bit makes MSB and LSB reversed in a byte.
* @size must be integral times of unsigned long.
* Example:
* MSB <--> LSB
* f2fs_set_bit(0, bitmap) => 1000 0000
* f2fs_set_bit(7, bitmap) => 0000 0001
*/
static unsigned long __find_rev_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
while (1) {
if (*p == 0)
goto pass;
tmp = __reverse_ulong((unsigned char *)p);
tmp &= ~0UL >> offset;
if (size < BITS_PER_LONG)
tmp &= (~0UL << (BITS_PER_LONG - size));
if (tmp)
goto found;
pass:
if (size <= BITS_PER_LONG)
break;
size -= BITS_PER_LONG;
offset = 0;
p++;
}
return result;
found:
return result - size + __reverse_ffs(tmp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 98 | 59.04% | 2 | 50.00% |
fan li | fan li | 41 | 24.70% | 1 | 25.00% |
changman lee | changman lee | 27 | 16.27% | 1 | 25.00% |
| Total | 166 | 100.00% | 4 | 100.00% |
static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
while (1) {
if (*p == ~0UL)
goto pass;
tmp = __reverse_ulong((unsigned char *)p);
if (offset)
tmp |= ~0UL << (BITS_PER_LONG - offset);
if (size < BITS_PER_LONG)
tmp |= ~0UL >> size;
if (tmp != ~0UL)
goto found;
pass:
if (size <= BITS_PER_LONG)
break;
size -= BITS_PER_LONG;
offset = 0;
p++;
}
return result;
found:
return result - size + __reverse_ffz(tmp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 145 | 84.30% | 3 | 75.00% |
changman lee | changman lee | 27 | 15.70% | 1 | 25.00% |
| Total | 172 | 100.00% | 4 | 100.00% |
void register_inmem_page(struct inode *inode, struct page *page)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *new;
f2fs_trace_pid(page);
set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
SetPagePrivate(page);
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
/* add atomic page indices to the list */
new->page = page;
INIT_LIST_HEAD(&new->list);
/* increase reference count with clean state */
mutex_lock(&fi->inmem_lock);
get_page(page);
list_add_tail(&new->list, &fi->inmem_pages);
inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
mutex_unlock(&fi->inmem_lock);
trace_f2fs_register_inmem_page(page, INMEM);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 114 | 89.76% | 5 | 83.33% |
chao yu | chao yu | 13 | 10.24% | 1 | 16.67% |
| Total | 127 | 100.00% | 6 | 100.00% |
static int __revoke_inmem_pages(struct inode *inode,
struct list_head *head, bool drop, bool recover)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct inmem_pages *cur, *tmp;
int err = 0;
list_for_each_entry_safe(cur, tmp, head, list) {
struct page *page = cur->page;
if (drop)
trace_f2fs_commit_inmem_page(page, INMEM_DROP);
lock_page(page);
if (recover) {
struct dnode_of_data dn;
struct node_info ni;
trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
err = -EAGAIN;
goto next;
}
get_node_info(sbi, dn.nid, &ni);
f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
cur->old_addr, ni.version, true, true);
f2fs_put_dnode(&dn);
}
next:
/* we don't need to invalidate this in the sccessful status */
if (drop || recover)
ClearPageUptodate(page);
set_page_private(page, 0);
ClearPagePrivate(page);
f2fs_put_page(page, 1);
list_del(&cur->list);
kmem_cache_free(inmem_entry_slab, cur);
dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
}
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 231 | 94.67% | 3 | 60.00% |
jaegeuk kim | jaegeuk kim | 13 | 5.33% | 2 | 40.00% |
| Total | 244 | 100.00% | 5 | 100.00% |
void drop_inmem_pages(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
clear_inode_flag(inode, FI_ATOMIC_FILE);
mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
mutex_unlock(&fi->inmem_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 50 | 87.72% | 2 | 66.67% |
jaegeuk kim | jaegeuk kim | 7 | 12.28% | 1 | 33.33% |
| Total | 57 | 100.00% | 3 | 100.00% |
static int __commit_inmem_pages(struct inode *inode,
struct list_head *revoke_list)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *cur, *tmp;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
.encrypted_page = NULL,
};
bool submit_bio = false;
int err = 0;
list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
struct page *page = cur->page;
lock_page(page);
if (page->mapping == inode->i_mapping) {
trace_f2fs_commit_inmem_page(page, INMEM);
set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
remove_dirty_inode(inode);
}
fio.page = page;
err = do_write_data_page(&fio);
if (err) {
unlock_page(page);
break;
}
/* record old blkaddr for revoking */
cur->old_addr = fio.old_blkaddr;
submit_bio = true;
}
unlock_page(page);
list_move_tail(&cur->list, revoke_list);
}
if (submit_bio)
f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
if (!err)
__revoke_inmem_pages(inode, revoke_list, false, false);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 171 | 66.54% | 11 | 61.11% |
chao yu | chao yu | 79 | 30.74% | 5 | 27.78% |
michael christie | michael christie | 6 | 2.33% | 1 | 5.56% |
christoph hellwig | christoph hellwig | 1 | 0.39% | 1 | 5.56% |
| Total | 257 | 100.00% | 18 | 100.00% |
int commit_inmem_pages(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct list_head revoke_list;
int err;
INIT_LIST_HEAD(&revoke_list);
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
mutex_lock(&fi->inmem_lock);
err = __commit_inmem_pages(inode, &revoke_list);
if (err) {
int ret;
/*
* try to revoke all committed pages, but still we could fail
* due to no memory or other reason, if that happened, EAGAIN
* will be returned, which means in such case, transaction is
* already not integrity, caller should use journal to do the
* recovery or rewrite & commit last transaction. For other
* error number, revoking was done by filesystem itself.
*/
ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
if (ret)
err = ret;
/* drop all uncommitted pages */
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
}
mutex_unlock(&fi->inmem_lock);
f2fs_unlock_op(sbi);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 132 | 97.06% | 2 | 50.00% |
jaegeuk kim | jaegeuk kim | 4 | 2.94% | 2 | 50.00% |
| Total | 136 | 100.00% | 4 | 100.00% |
/*
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT))
f2fs_stop_checkpoint(sbi, false);
#endif
if (!need)
return;
/* balance_fs_bg is able to be pending */
if (excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi);
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi, false, false);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 61 | 72.62% | 7 | 77.78% |
chao yu | chao yu | 23 | 27.38% | 2 | 22.22% |
| Total | 84 | 100.00% | 9 | 100.00% |
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
{
/* try to shrink extent cache when there is no enough memory */
if (!available_free_memory(sbi, EXTENT_CACHE))
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
/* check the # of cached NAT entries */
if (!available_free_memory(sbi, NAT_ENTRIES))
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS))
try_to_free_nids(sbi, MAX_FREE_NIDS);
else
build_free_nids(sbi, false);
if (!is_idle(sbi))
return;
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
!available_free_memory(sbi, INO_ENTRIES) ||
excess_prefree_segs(sbi) ||
excess_dirty_nats(sbi) ||
f2fs_time_over(sbi, CP_TIME)) {
if (test_opt(sbi, DATA_FLUSH)) {
struct blk_plug plug;
blk_start_plug(&plug);
sync_dirty_inodes(sbi, FILE_INODE);
blk_finish_plug(&plug);
}
f2fs_sync_fs(sbi->sb, true);
stat_inc_bg_cp_count(sbi->stat_info);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 97 | 57.74% | 9 | 60.00% |
chao yu | chao yu | 71 | 42.26% | 6 | 40.00% |
| Total | 168 | 100.00% | 15 | 100.00% |
static int __submit_flush_wait(struct block_device *bdev)
{
struct bio *bio = f2fs_bio_alloc(0);
int ret;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio->bi_bdev = bdev;
ret = submit_bio_wait(bio);
bio_put(bio);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 48 | 90.57% | 1 | 50.00% |
linus torvalds | linus torvalds | 5 | 9.43% | 1 | 50.00% |
| Total | 53 | 100.00% | 2 | 100.00% |
static int submit_flush_wait(struct f2fs_sb_info *sbi)
{
int ret = __submit_flush_wait(sbi->sb->s_bdev);
int i;
if (sbi->s_ndevs && !ret) {
for (i = 1; i < sbi->s_ndevs; i++) {
ret = __submit_flush_wait(FDEV(i).bdev);
if (ret)
break;
}
}
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 74 | 100.00% | 1 | 100.00% |
| Total | 74 | 100.00% | 1 | 100.00% |
static int issue_flush_thread(void *data)
{
struct f2fs_sb_info *sbi = data;
struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
wait_queue_head_t *q = &fcc->flush_wait_queue;
repeat:
if (kthread_should_stop())
return 0;
if (!llist_empty(&fcc->issue_list)) {
struct flush_cmd *cmd, *next;
int ret;
fcc->dispatch_list = llist_del_all(&fcc->issue_list);
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
ret = submit_flush_wait(sbi);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
cmd->ret = ret;
complete(&cmd->wait);
}
fcc->dispatch_list = NULL;
}
wait_event_interruptible(*q,
kthread_should_stop() || !llist_empty(&fcc->issue_list));
goto repeat;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 105 | 67.74% | 2 | 40.00% |
gu zheng | gu zheng | 50 | 32.26% | 3 | 60.00% |
| Total | 155 | 100.00% | 5 | 100.00% |
int f2fs_issue_flush(struct f2fs_sb_info *sbi)
{
struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
struct flush_cmd cmd;
trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
test_opt(sbi, FLUSH_MERGE));
if (test_opt(sbi, NOBARRIER))
return 0;
if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
int ret;
atomic_inc(&fcc->submit_flush);
ret = submit_flush_wait(sbi);
atomic_dec(&fcc->submit_flush);
return ret;
}
init_completion(&cmd.wait);
atomic_inc(&fcc->submit_flush);
llist_add(&cmd.llnode, &fcc->issue_list);
if (!fcc->dispatch_list)
wake_up(&fcc->flush_wait_queue);
if (fcc->f2fs_issue_flush) {
wait_for_completion(&cmd.wait);
atomic_dec(&fcc->submit_flush);
} else {
llist_del_all(&fcc->issue_list);
atomic_set(&fcc->submit_flush, 0);
}
return cmd.ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 186 | 91.63% | 7 | 63.64% |
gu zheng | gu zheng | 13 | 6.40% | 3 | 27.27% |
chao yu | chao yu | 4 | 1.97% | 1 | 9.09% |
| Total | 203 | 100.00% | 11 | 100.00% |
int create_flush_cmd_control(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
struct flush_cmd_control *fcc;
int err = 0;
if (SM_I(sbi)->cmd_control_info) {
fcc = SM_I(sbi)->cmd_control_info;
goto init_thread;
}
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
atomic_set(&fcc->submit_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->cmd_control_info = fcc;
init_thread:
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
err = PTR_ERR(fcc->f2fs_issue_flush);
kfree(fcc);
SM_I(sbi)->cmd_control_info = NULL;
return err;
}
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
gu zheng | gu zheng | 124 | 70.45% | 2 | 33.33% |
jaegeuk kim | jaegeuk kim | 35 | 19.89% | 2 | 33.33% |
chao yu | chao yu | 17 | 9.66% | 2 | 33.33% |
| Total | 176 | 100.00% | 6 | 100.00% |
void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
{
struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
if (fcc && fcc->f2fs_issue_flush) {
struct task_struct *flush_thread = fcc->f2fs_issue_flush;
fcc->f2fs_issue_flush = NULL;
kthread_stop(flush_thread);
}
if (free) {
kfree(fcc);
SM_I(sbi)->cmd_control_info = NULL;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
gu zheng | gu zheng | 46 | 61.33% | 1 | 33.33% |
jaegeuk kim | jaegeuk kim | 23 | 30.67% | 1 | 33.33% |
chao yu | chao yu | 6 | 8.00% | 1 | 33.33% |
| Total | 75 | 100.00% | 3 | 100.00% |
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
/* need not be added */
if (IS_CURSEG(sbi, segno))
return;
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]++;
if (dirty_type == DIRTY) {
struct seg_entry *sentry = get_seg_entry(sbi, segno);
enum dirty_type t = sentry->type;
if (unlikely(t >= DIRTY)) {
f2fs_bug_on(sbi, 1);
return;
}
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
dirty_i->nr_dirty[t]++;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 131 | 98.50% | 3 | 75.00% |
changman lee | changman lee | 2 | 1.50% | 1 | 25.00% |
| Total | 133 | 100.00% | 4 | 100.00% |
static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
if (dirty_type == DIRTY) {
struct seg_entry *sentry = get_seg_entry(sbi, segno);
enum dirty_type t = sentry->type;
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
dirty_i->nr_dirty[t]--;
if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
clear_bit(GET_SECNO(sbi, segno),
dirty_i->victim_secmap);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 115 | 88.46% | 3 | 75.00% |
changman lee | changman lee | 15 | 11.54% | 1 | 25.00% |
| Total | 130 | 100.00% | 4 | 100.00% |
/*
* Should not occur error such as -ENOMEM.
* Adding dirty entry into seglist is not critical operation.
* If a given segment is one of current working segments, it won't be added.
*/
static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned short valid_blocks;
if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
return;
mutex_lock(&dirty_i->seglist_lock);
valid_blocks = get_valid_blocks(sbi, segno, 0);
if (valid_blocks == 0) {
__locate_dirty_segment(sbi, segno, PRE);
__remove_dirty_segment(sbi, segno, DIRTY);
} else if (valid_blocks < sbi->blocks_per_seg) {
__locate_dirty_segment(sbi, segno, DIRTY);
} else {
/* Recovery routine with SSR needs this */
__remove_dirty_segment(sbi, segno, DIRTY);
}
mutex_unlock(&dirty_i->seglist_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jaegeuk kim | jaegeuk kim | 128 | 99.22% | 1 | 50.00% |
haicheng li | haicheng li | 1 | 0.78% | 1 | 50.00% |
| Total | 129 | 100.00% | 2 | 100.00% |
static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi,
struct bio *bio)
{
struct list_head *wait_list = &(SM_I(sbi)->wait_list);
struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
INIT_LIST_HEAD(&be->list);
be->bio = bio;
init_completion(&be->event);
list_add_tail(&be->list, wait_list);
return be;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
chao yu | chao yu | 80 | 100.00% | 1 | 100.00% |
| Total | 80 | 100.00% | 1 | 100.00% |
void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi)
{
struct list_head *wait_list = &(SM_I(sbi)->wait_list);
struct bio_entry *be, *tmp;
list_for_each_entry_safe(be, tmp, wait_list, list) {
struct bio *bio = be->bio;
int err;
wait_for_completion_io(&be->event);
err = be->error;
if (err == -EOPNOTSUPP)
err = 0;
if (err)
f2fs_msg(sbi->sb, KERN_INFO,
"Issue discard failed, ret: %d", err)