This patch supports to inject fault into f2fs_kmem_cache_alloc().
Usage:
a) echo 32768 > /sys/fs/f2fs/<dev>/inject_type or
b) mount -o fault_type=32768 <dev> <mountpoint>
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
FAULT_CHECKPOINT 0x000001000
FAULT_DISCARD 0x000002000
FAULT_WRITE_IO 0x000004000
+ FAULT_SLAB_ALLOC 0x000008000
=================== ===========
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
retry:
if (!e)
- new = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
+ new = f2fs_kmem_cache_alloc(ino_entry_slab,
+ GFP_NOFS, true, NULL);
radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
unsigned int size = sizeof(struct page *) * nr;
if (likely(size <= sbi->page_array_slab_size))
- return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
+ return f2fs_kmem_cache_alloc(sbi->page_array_slab,
+ GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}
fio.version = ni.version;
- cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
+ cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
if (!cic)
goto out_put_dnode;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i;
- dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
+ dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
+ false, F2FS_I_SB(cc->inode));
if (!dic)
return ERR_PTR(-ENOMEM);
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
struct bio_entry *be;
- be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
+ be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
be->bio = bio;
bio_get(bio);
struct super_block *sb = dir->i_sb;
if (IS_CASEFOLDED(dir)) {
- fname->cf_name.name = kmem_cache_alloc(f2fs_cf_name_slab,
- GFP_NOFS);
+ fname->cf_name.name = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
+ GFP_NOFS, false, F2FS_SB(sb));
if (!fname->cf_name.name)
return -ENOMEM;
fname->cf_name.len = utf8_casefold(sb->s_encoding,
{
struct extent_node *en;
- en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
+ en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
if (!en)
return NULL;
mutex_lock(&sbi->extent_tree_lock);
et = radix_tree_lookup(&sbi->extent_tree_root, ino);
if (!et) {
- et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
+ et = f2fs_kmem_cache_alloc(extent_tree_slab,
+ GFP_NOFS, true, NULL);
f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
memset(et, 0, sizeof(struct extent_tree));
et->ino = ino;
FAULT_CHECKPOINT,
FAULT_DISCARD,
FAULT_WRITE_IO,
+ FAULT_SLAB_ALLOC,
FAULT_MAX,
};
return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
}
-static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
+static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
gfp_t flags)
{
void *entry;
return entry;
}
+static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
+{
+ if (nofail)
+ return f2fs_kmem_cache_alloc_nofail(cachep, flags);
+
+ if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) {
+ f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC);
+ return NULL;
+ }
+
+ return kmem_cache_alloc(cachep, flags);
+}
+
static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
{
if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
struct atgc_management *am = &sbi->am;
struct victim_entry *ve;
- ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS);
+ ve = f2fs_kmem_cache_alloc(victim_entry_slab,
+ GFP_NOFS, true, NULL);
ve->mtime = mtime;
ve->segno = segno;
iput(inode);
return;
}
- new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
+ new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
+ GFP_NOFS, true, NULL);
new_ie->inode = inode;
f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
return dst_page;
}
-static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
+static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
+ nid_t nid, bool no_fail)
{
struct nat_entry *new;
- if (no_fail)
- new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
- else
- new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
+ new = f2fs_kmem_cache_alloc(nat_entry_slab,
+ GFP_F2FS_ZERO, no_fail, sbi);
if (new) {
nat_set_nid(new, nid);
nat_reset_flag(new);
head = radix_tree_lookup(&nm_i->nat_set_root, set);
if (!head) {
- head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
+ head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
+ GFP_NOFS, true, NULL);
INIT_LIST_HEAD(&head->entry_list);
INIT_LIST_HEAD(&head->set_list);
unsigned long flags;
unsigned int seq_id;
- fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
+ fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
+ GFP_NOFS, true, NULL);
get_page(page);
fn->page = page;
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *new, *e;
- new = __alloc_nat_entry(nid, false);
+ new = __alloc_nat_entry(sbi, nid, false);
if (!new)
return;
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
- struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
+ struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid);
if (unlikely(f2fs_check_nid_range(sbi, nid)))
return false;
- i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
+ i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
i->nid = nid;
i->state = FREE_NID;
ne = __lookup_nat_cache(nm_i, nid);
if (!ne) {
- ne = __alloc_nat_entry(nid, true);
+ ne = __alloc_nat_entry(sbi, nid, true);
__init_nat_entry(nm_i, ne, &raw_ne, true);
}
goto err_out;
}
- entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
+ entry = f2fs_kmem_cache_alloc(fsync_entry_slab,
+ GFP_F2FS_ZERO, true, NULL);
entry->inode = inode;
list_add_tail(&entry->list, head);
set_page_private_atomic(page);
- new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
+ new = f2fs_kmem_cache_alloc(inmem_entry_slab,
+ GFP_NOFS, true, NULL);
/* add atomic page indices to the list */
new->page = page;
pend_list = &dcc->pend_list[plist_idx(len)];
- dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
+ dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
INIT_LIST_HEAD(&dc->list);
dc->bdev = bdev;
dc->lstart = lstart;
if (!de) {
de = f2fs_kmem_cache_alloc(discard_entry_slab,
- GFP_F2FS_ZERO);
+ GFP_F2FS_ZERO, true, NULL);
de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
list_add_tail(&de->list, head);
}
static struct sit_entry_set *grab_sit_entry_set(void)
{
struct sit_entry_set *ses =
- f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
+ f2fs_kmem_cache_alloc(sit_entry_set_slab,
+ GFP_NOFS, true, NULL);
ses->entry_cnt = 0;
INIT_LIST_HEAD(&ses->set_list);
[FAULT_CHECKPOINT] = "checkpoint error",
[FAULT_DISCARD] = "discard error",
[FAULT_WRITE_IO] = "write IO error",
+ [FAULT_SLAB_ALLOC] = "slab alloc",
};
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
{
struct f2fs_inode_info *fi;
- fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
+ fi = f2fs_kmem_cache_alloc(f2fs_inode_cachep,
+ GFP_F2FS_ZERO, false, F2FS_SB(sb));
if (!fi)
return NULL;
{
if (likely(size == sbi->inline_xattr_slab_size)) {
*is_inline = true;
- return kmem_cache_zalloc(sbi->inline_xattr_slab, GFP_NOFS);
+ return f2fs_kmem_cache_alloc(sbi->inline_xattr_slab,
+ GFP_F2FS_ZERO, false, sbi);
}
*is_inline = false;
return f2fs_kzalloc(sbi, size, GFP_NOFS);