unsigned int segno;
unsigned int gc_secs = dry_run_sections;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int old_segno;
+ int err = 0;
if (type == CURSEG_COLD_DATA_PINNED && !curseg->inited)
goto allocate;
allocate:
old_segno = curseg->segno;
- if (new_curseg(sbi, type, true))
- return -EAGAIN;
+ err = new_curseg(sbi, type, true);
+ if (err)
+ return err;
stat_inc_seg_type(sbi, curseg);
locate_dirty_segment(sbi, old_segno);
return 0;
err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
f2fs_unlock_op(sbi);
- if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) {
+ if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
f2fs_down_write(&sbi->gc_lock);
- f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1);
+ err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1);
f2fs_up_write(&sbi->gc_lock);
gc_required = false;
- goto retry;
+ if (!err)
+ goto retry;
}
return err;