bcachefs: Separate discards from rest of journal reclaim
authorKent Overstreet <kent.overstreet@gmail.com>
Sun, 3 Mar 2019 23:39:07 +0000 (18:39 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:17 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/journal.c
fs/bcachefs/journal_reclaim.c
fs/bcachefs/journal_reclaim.h
fs/bcachefs/journal_types.h

index 17add726f2acea37d86224f9f4c15cfa0cabc652..80d7980cf5aa5dc53387e592fd9b7ba8f546f88e 100644 (file)
@@ -322,6 +322,7 @@ static int __journal_res_get(struct journal *j, struct journal_res *res,
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct journal_buf *buf;
+       bool can_discard;
        int ret;
 retry:
        if (journal_res_get_fast(j, res, flags))
@@ -370,18 +371,28 @@ retry:
            !j->res_get_blocked_start)
                j->res_get_blocked_start = local_clock() ?: 1;
 
+       can_discard = j->can_discard;
        spin_unlock(&j->lock);
 
        if (!ret)
                goto retry;
+
        if (ret == -ENOSPC) {
                /*
                 * Journal is full - can't rely on reclaim from work item due to
                 * freezing:
                 */
                trace_journal_full(c);
-               if (!(flags & JOURNAL_RES_GET_NONBLOCK))
+
+               if (!(flags & JOURNAL_RES_GET_NONBLOCK)) {
+                       if (can_discard) {
+                               bch2_journal_do_discards(j);
+                               goto retry;
+                       }
+
                        bch2_journal_reclaim_work(&j->reclaim_work.work);
+               }
+
                ret = -EAGAIN;
        }
 
index 3a85fb8b852697006936da5930759aa568a02775..ac9e6cb3d4eef7674caa72d54010342225df767c 100644 (file)
@@ -45,6 +45,7 @@ void bch2_journal_space_available(struct journal *j)
        unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
                ? journal_prev_buf(j)->sectors
                : 0;
+       bool can_discard = false;
        int ret = 0;
 
        lockdep_assert_held(&j->lock);
@@ -65,9 +66,14 @@ void bch2_journal_space_available(struct journal *j)
                       ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
                        ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
 
+               if (ja->discard_idx != ja->dirty_idx_ondisk)
+                       can_discard = true;
+
                nr_online++;
        }
 
+       j->can_discard = can_discard;
+
        if (nr_online < c->opts.metadata_replicas_required) {
                ret = -EROFS;
                sectors_next_entry = 0;
@@ -156,7 +162,7 @@ static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
  * Advance ja->discard_idx as long as it points to buckets that are no longer
  * dirty, issuing discards if necessary:
  */
-static void bch2_journal_do_discards(struct journal *j)
+void bch2_journal_do_discards(struct journal *j)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct bch_dev *ca;
index a9afb229541b39a681b011ed8a5243e7d829b254..183419ea3e2529b61db6254758c0f3fb12792d20 100644 (file)
@@ -34,6 +34,7 @@ void bch2_journal_pin_add_if_older(struct journal *,
                                  journal_pin_flush_fn);
 void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
 
+void bch2_journal_do_discards(struct journal *);
 void bch2_journal_reclaim_work(struct work_struct *);
 
 void bch2_journal_flush_pins(struct journal *, u64);
index 09b2d22230335cb33e625f3e0f1ce811bff8443c..c91a21e0780985dcfeaac553027aabbb8e7ca687 100644 (file)
@@ -210,6 +210,8 @@ struct journal {
 
        /* protects advancing ja->discard_idx: */
        struct mutex            discard_lock;
+       bool                    can_discard;
+
        unsigned                write_delay_ms;
        unsigned                reclaim_delay_ms;