bool bch2_btree_insert_key_cached(struct btree_trans *trans,
                                  unsigned flags,
-                                 struct btree_path *path,
-                                 struct bkey_i *insert)
+                                 struct btree_insert_entry *insert_entry)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_cached *ck = (void *) path->l[0].b;
+       struct bkey_cached *ck = (void *) insert_entry->path->l[0].b;
+       struct bkey_i *insert = insert_entry->k;
        bool kick_reclaim = false;
 
        BUG_ON(insert->k.u64s > ck->u64s);
                        kick_reclaim = true;
        }
 
+       /*
+        * To minimize lock contention, we only add the journal pin here and
+        * defer pin updates to the flush callback via ->seq. Be careful not to
+        * update ->seq on nojournal commits because we don't want to update the
+        * pin to a seq that doesn't include journal updates on disk. Otherwise
+        * we risk losing the update after a crash.
+        *
+        * The only exception is if the pin is not active in the first place. We
+        * have to add the pin because journal reclaim drives key cache
+        * flushing. The flush callback will not proceed unless ->seq matches
+        * the latest pin, so make sure it starts with a consistent value.
+        */
+       if (!(insert_entry->flags & BTREE_UPDATE_NOJOURNAL) ||
+           !journal_pin_active(&ck->journal)) {
+               ck->seq = trans->journal_res.seq;
+       }
        bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
                             &ck->journal, bch2_btree_key_cache_journal_flush);
-       ck->seq = trans->journal_res.seq;
 
        if (kick_reclaim)
                journal_reclaim_kick(&c->journal);
 
                                    unsigned);
 
 bool bch2_btree_insert_key_cached(struct btree_trans *, unsigned,
-                       struct btree_path *, struct bkey_i *);
+                       struct btree_insert_entry *);
 int bch2_btree_key_cache_flush(struct btree_trans *,
                               enum btree_id, struct bpos);
 void bch2_btree_key_cache_drop(struct btree_trans *,
 
                if (!i->cached)
                        btree_insert_key_leaf(trans, i);
                else if (!i->key_cache_already_flushed)
-                       bch2_btree_insert_key_cached(trans, flags, i->path, i->k);
+                       bch2_btree_insert_key_cached(trans, flags, i);
                else {
                        bch2_btree_key_cache_drop(trans, i->path);
                        btree_path_set_dirty(i->path, BTREE_ITER_NEED_TRAVERSE);