!(ret = btree_iter_err(k))) {
if (!bkey_extent_is_data(k.k) ||
!bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
- ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, k);
+ ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS, k);
if (ret)
break;
bch2_btree_iter_next(&iter);
*/
bch2_extent_normalize(c, e.s);
- ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER,
+ ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS,
bkey_i_to_s_c(&tmp.key));
if (ret)
break;
*/
bch2_btree_iter_downgrade(&iter);
- ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE,
+ ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key));
if (ret)
goto err;
goto next;
}
- ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER,
+ ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS,
extent_i_to_s_c(insert).s_c);
if (ret)
break;
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_PREFETCH, k) {
- ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, k);
+ ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS, k);
if (ret)
break;
}
for (id = 0; id < BTREE_ID_NR; id++) {
for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
- ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE,
+ ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key));
bch2_btree_iter_cond_resched(&iter);
return out - buf;
}
+static void extent_to_replicas(struct bkey_s_c k,
+ struct bch_replicas_entry *r)
+{
+ if (bkey_extent_is_data(k.k)) {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ extent_for_each_ptr_decode(e, p, entry)
+ if (!p.ptr.cached)
+ r->devs[r->nr_devs++] = p.ptr.dev;
+ }
+}
+
+static void bkey_to_replicas(enum bkey_type type,
+ struct bkey_s_c k,
+ struct bch_replicas_entry *e)
+{
+ e->nr_devs = 0;
+
+ switch (type) {
+ case BKEY_TYPE_BTREE:
+ e->data_type = BCH_DATA_BTREE;
+ extent_to_replicas(k, e);
+ break;
+ case BKEY_TYPE_EXTENTS:
+ e->data_type = BCH_DATA_USER;
+ extent_to_replicas(k, e);
+ break;
+ default:
+ break;
+ }
+
+ replicas_entry_sort(e);
+}
+
static inline void devlist_to_replicas(struct bch_devs_list devs,
enum bch_data_type data_type,
struct bch_replicas_entry *e)
return ret;
}
+static int __bch2_mark_replicas(struct bch_fs *c,
+ struct bch_replicas_entry *devs)
+{
+ struct bch_replicas_cpu *r, *gc_r;
+ bool marked;
+
+ rcu_read_lock();
+ r = rcu_dereference(c->replicas);
+ gc_r = rcu_dereference(c->replicas_gc);
+ marked = replicas_has_entry(r, devs) &&
+ (!likely(gc_r) || replicas_has_entry(gc_r, devs));
+ rcu_read_unlock();
+
+ return likely(marked) ? 0
+ : bch2_mark_replicas_slowpath(c, devs);
+}
+
int bch2_mark_replicas(struct bch_fs *c,
enum bch_data_type data_type,
struct bch_devs_list devs)
{
struct bch_replicas_entry_padded search;
- struct bch_replicas_cpu *r, *gc_r;
- bool marked;
if (!devs.nr)
return 0;
devlist_to_replicas(devs, data_type, &search.e);
- rcu_read_lock();
- r = rcu_dereference(c->replicas);
- gc_r = rcu_dereference(c->replicas_gc);
- marked = replicas_has_entry(r, &search.e) &&
- (!likely(gc_r) || replicas_has_entry(gc_r, &search.e));
- rcu_read_unlock();
-
- return likely(marked) ? 0
- : bch2_mark_replicas_slowpath(c, &search.e);
+ return __bch2_mark_replicas(c, &search.e);
}
int bch2_mark_bkey_replicas(struct bch_fs *c,
- enum bch_data_type data_type,
+ enum bkey_type type,
struct bkey_s_c k)
{
- struct bch_devs_list cached = bch2_bkey_cached_devs(k);
- unsigned i;
+ struct bch_replicas_entry_padded search;
int ret;
- for (i = 0; i < cached.nr; i++)
- if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED,
- bch2_dev_list_single(cached.devs[i]))))
- return ret;
+ if (type == BKEY_TYPE_EXTENTS) {
+ struct bch_devs_list cached = bch2_bkey_cached_devs(k);
+ unsigned i;
+
+ for (i = 0; i < cached.nr; i++)
+ if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED,
+ bch2_dev_list_single(cached.devs[i]))))
+ return ret;
+ }
+
+ bkey_to_replicas(type, k, &search.e);
- return bch2_mark_replicas(c, data_type, bch2_bkey_dirty_devs(k));
+ return search.e.nr_devs
+ ? __bch2_mark_replicas(c, &search.e)
+ : 0;
}
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
}
bool bch2_bkey_replicas_marked(struct bch_fs *c,
- enum bch_data_type data_type,
+ enum bkey_type type,
struct bkey_s_c k)
{
- struct bch_devs_list cached = bch2_bkey_cached_devs(k);
- unsigned i;
+ struct bch_replicas_entry_padded search;
+ bool ret;
+
+ if (type == BKEY_TYPE_EXTENTS) {
+ struct bch_devs_list cached = bch2_bkey_cached_devs(k);
+ unsigned i;
+
+ for (i = 0; i < cached.nr; i++)
+ if (!bch2_replicas_marked(c, BCH_DATA_CACHED,
+ bch2_dev_list_single(cached.devs[i])))
+ return false;
+ }
+
+ bkey_to_replicas(type, k, &search.e);
- for (i = 0; i < cached.nr; i++)
- if (!bch2_replicas_marked(c, BCH_DATA_CACHED,
- bch2_dev_list_single(cached.devs[i])))
- return false;
+ if (!search.e.nr_devs)
+ return true;
+
+ rcu_read_lock();
+ ret = replicas_has_entry(rcu_dereference(c->replicas), &search.e);
+ rcu_read_unlock();
- return bch2_replicas_marked(c, data_type, bch2_bkey_dirty_devs(k));
+ return ret;
}
struct replicas_status __bch2_replicas_status(struct bch_fs *c,