return ret;
}
+ if (new_a->data_type == BCH_DATA_cached &&
+ !new_a->io_time[READ])
+ new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+
+
old_lru = alloc_lru_idx(old_a);
new_lru = alloc_lru_idx(*new_a);
if (ret)
return ret;
- if (new_lru && new_a->io_time[READ] != new_lru)
+ if (new_a->data_type == BCH_DATA_cached)
new_a->io_time[READ] = new_lru;
}
if (!a.io_time[READ])
a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
- ret = bch2_lru_change(trans,
- alloc_k.k->p.inode,
- alloc_k.k->p.offset,
- 0, &a.io_time[READ]);
+ ret = bch2_lru_set(trans,
+ alloc_k.k->p.inode,
+ alloc_k.k->p.offset,
+ &a.io_time[READ]);
if (ret)
goto err;
a->v = new;
+ /*
+ * The trigger normally makes sure this is set, but we're not running
+ * triggers:
+ */
+ if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
+ a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+
ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
fsck_err:
return ret;
pr_buf(out, "idx %llu", le64_to_cpu(lru->idx));
}
-static int lru_delete(struct btree_trans *trans, u64 id, u64 idx, u64 time)
+int bch2_lru_delete(struct btree_trans *trans, u64 id, u64 idx, u64 time)
{
struct btree_iter iter;
struct bkey_s_c k;
return ret;
}
-static int lru_set(struct btree_trans *trans, u64 lru_id, u64 idx, u64 *time)
+int bch2_lru_set(struct btree_trans *trans, u64 lru_id, u64 idx, u64 *time)
{
struct btree_iter iter;
struct bkey_s_c k;
if (old_time == *new_time)
return 0;
- return lru_delete(trans, id, idx, old_time) ?:
- lru_set(trans, id, idx, new_time);
+ return bch2_lru_delete(trans, id, idx, old_time) ?:
+ bch2_lru_set(trans, id, idx, new_time);
}
static int bch2_check_lru_key(struct btree_trans *trans,
.val_to_text = bch2_lru_to_text, \
}
+int bch2_lru_delete(struct btree_trans *, u64, u64, u64);
+int bch2_lru_set(struct btree_trans *, u64, u64, u64 *);
int bch2_lru_change(struct btree_trans *, u64, u64, u64, u64 *);
int bch2_check_lrus(struct bch_fs *, bool);