return 0;
}
+static int journal_reclaim_wait_done(struct bch_fs *c)
+{
+ int ret;
+
+ ret = bch2_journal_error(&c->journal);
+ if (ret)
+ return ret;
+
+ ret = !bch2_btree_key_cache_must_wait(c);
+ if (ret)
+ return ret;
+
+ if (mutex_trylock(&c->journal.reclaim_lock)) {
+ ret = bch2_journal_reclaim(&c->journal);
+ mutex_unlock(&c->journal.reclaim_lock);
+ }
+
+ if (!ret)
+ ret = !bch2_btree_key_cache_must_wait(c);
+ return ret;
+}
+
static noinline
int bch2_trans_commit_error(struct btree_trans *trans,
struct btree_insert_entry *i,
case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
bch2_trans_unlock(trans);
- do {
- mutex_lock(&c->journal.reclaim_lock);
- ret = bch2_journal_reclaim(&c->journal);
- mutex_unlock(&c->journal.reclaim_lock);
- } while (!ret && bch2_btree_key_cache_must_wait(c));
+ wait_event(c->journal.reclaim_wait,
+ (ret = journal_reclaim_wait_done(c)));
+ if (ret < 0)
+ return ret;
- if (!ret && bch2_trans_relock(trans))
+ if (bch2_trans_relock(trans))
return 0;
trace_trans_restart_journal_reclaim(trans->ip);
spin_lock_init(&j->err_lock);
init_waitqueue_head(&j->wait);
INIT_DELAYED_WORK(&j->write_work, journal_write_work);
+ init_waitqueue_head(&j->reclaim_wait);
init_waitqueue_head(&j->pin_flush_wait);
mutex_init(&j->reclaim_lock);
mutex_init(&j->discard_lock);
min_nr = max(min_nr, bch2_nr_btree_keys_want_flush(c));
+ /* Don't do too many without delivering wakeup: */
+ min_nr = min(min_nr, 128UL);
+
trace_journal_reclaim_start(c,
min_nr,
j->prereserved.reserved,
else
j->nr_background_reclaim += nr_flushed;
trace_journal_reclaim_finish(c, nr_flushed);
+
+ if (nr_flushed)
+ wake_up(&j->reclaim_wait);
} while (min_nr && nr_flushed);
memalloc_noreclaim_restore(flags);