mutex_init(&clock->lock);
}
+int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
+ size_t bucket_nr, int rw)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
+ struct btree_iter *iter;
+ struct bucket *g;
+ struct bkey_i_alloc *a;
+ struct bkey_alloc_unpacked u;
+ u16 *time;
+ int ret = 0;
+
+ iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, POS(dev, bucket_nr),
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ goto out;
+
+ a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ goto out;
+
+ percpu_down_read(&c->mark_lock);
+ g = bucket(ca, bucket_nr);
+ u = alloc_mem_to_key(g, READ_ONCE(g->mark));
+ percpu_up_read(&c->mark_lock);
+
+ bkey_alloc_init(&a->k_i);
+ a->k.p = iter->pos;
+
+ time = rw == READ ? &u.read_time : &u.write_time;
+ if (*time == c->bucket_clock[rw].hand)
+ goto out;
+
+ *time = c->bucket_clock[rw].hand;
+
+ bch2_alloc_pack(a, u);
+
+ ret = bch2_trans_update(trans, iter, &a->k_i, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL, 0);
+out:
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+}
+
/* Background allocator thread: */
/*
void bch2_alloc_pack(struct bkey_i_alloc *,
const struct bkey_alloc_unpacked);
+int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
+
static inline struct bkey_alloc_unpacked
alloc_mem_to_key(struct bucket *g, struct bucket_mark m)
{
.dev = ca->dev_idx,
};
- bucket_io_clock_reset(c, ca, bucket, READ);
- bucket_io_clock_reset(c, ca, bucket, WRITE);
spin_unlock(&ob->lock);
if (c->blocked_allocate_open_bucket) {
return __bucket(ca, b, false);
}
-static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
- size_t b, int rw)
-{
- bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
-}
-
static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
{
return c->bucket_clock[rw].hand - g->io_time[rw];
if (bkey_extent_is_allocation(k.k))
bch2_add_page_sectors(&rbio->bio, k);
- bch2_read_extent(c, rbio, k, offset_into_extent, flags);
+ bch2_read_extent(trans, rbio, k, offset_into_extent, flags);
if (flags & BCH_READ_LAST_FRAGMENT)
break;
*/
#include "bcachefs.h"
+#include "alloc_background.h"
#include "alloc_foreground.h"
#include "bkey_on_stack.h"
#include "bset.h"
goto out;
}
- ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags);
+ ret = __bch2_read_extent(&trans, rbio, bvec_iter, k, 0, failed, flags);
if (ret == READ_RETRY)
goto retry;
if (ret)
bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
swap(bvec_iter.bi_size, bytes);
- ret = __bch2_read_extent(c, rbio, bvec_iter, k,
+ ret = __bch2_read_extent(&trans, rbio, bvec_iter, k,
offset_into_extent, failed, flags);
switch (ret) {
case READ_RETRY:
return ret;
}
-int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
+int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bvec_iter iter, struct bkey_s_c k,
unsigned offset_into_extent,
struct bch_io_failures *failed, unsigned flags)
{
+ struct bch_fs *c = trans->c;
struct extent_ptr_decoded pick;
struct bch_read_bio *rbio = NULL;
struct bch_dev *ca;
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
- rcu_read_lock();
- bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
- rcu_read_unlock();
+ if (pick.ptr.cached)
+ bch2_bucket_io_time_reset(trans, pick.ptr.dev,
+ PTR_BUCKET_NR(ca, &pick.ptr), READ);
if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
bio_inc_remaining(&orig->bio);
if (rbio->bio.bi_iter.bi_size == bytes)
flags |= BCH_READ_LAST_FRAGMENT;
- bch2_read_extent(c, rbio, k, offset_into_extent, flags);
+ bch2_read_extent(&trans, rbio, k, offset_into_extent, flags);
if (flags & BCH_READ_LAST_FRAGMENT)
break;
BCH_READ_IN_RETRY = 1 << 7,
};
-int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *,
+int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
struct bvec_iter, struct bkey_s_c, unsigned,
struct bch_io_failures *, unsigned);
-static inline void bch2_read_extent(struct bch_fs *c,
+static inline void bch2_read_extent(struct btree_trans *trans,
struct bch_read_bio *rbio,
struct bkey_s_c k,
unsigned offset_into_extent,
unsigned flags)
{
- __bch2_read_extent(c, rbio, rbio->bio.bi_iter, k,
+ __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, k,
offset_into_extent, NULL, flags);
}
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
-static int bch2_move_extent(struct bch_fs *c,
+static int bch2_move_extent(struct btree_trans *trans,
struct moving_context *ctxt,
struct write_point_specifier wp,
struct bch_io_opts io_opts,
enum data_cmd data_cmd,
struct data_opts data_opts)
{
+ struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
struct moving_io *io;
const union bch_extent_entry *entry;
* ctxt when doing wakeup
*/
closure_get(&ctxt->cl);
- bch2_read_extent(c, &io->rbio, k, 0,
+ bch2_read_extent(trans, &io->rbio, k, 0,
BCH_READ_NODECODE|
BCH_READ_LAST_FRAGMENT);
return 0;
k = bkey_i_to_s_c(sk.k);
bch2_trans_unlock(&trans);
- ret2 = bch2_move_extent(c, ctxt, wp, io_opts, btree_id, k,
+ ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k,
data_cmd, data_opts);
if (ret2) {
if (ret2 == -ENOMEM) {