.key_invalid = empty_val_key_invalid, \
}
+static const char *key_type_inline_data_invalid(const struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ return NULL;
+}
+
+static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ pr_buf(out, "(%zu bytes)", bkey_val_bytes(k.k));
+}
+
+static const struct bkey_ops bch2_bkey_ops_inline_data = {
+ .key_invalid = key_type_inline_data_invalid,
+ .val_to_text = key_type_inline_data_to_text,
+};
+
static const struct bkey_ops bch2_bkey_ops[] = {
#define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
BCH_BKEY_TYPES()
if (k.k->u64s < BKEY_U64s)
return "u64s too small";
- if ((btree_node_type_is_extents(type) ||
- type == BKEY_TYPE_BTREE) &&
- bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
+ if (type == BKEY_TYPE_BTREE &&
+ bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
return "value too big";
if (btree_node_type_is_extents(type)) {
for (src = keys->keys; src != keys->top; src = n) {
n = bkey_next(src);
- bkey_copy(dst, src);
- bch2_bkey_drop_ptrs(bkey_i_to_s(dst), ptr,
- test_bit(ptr->dev, op->failed.d));
+ if (bkey_extent_is_direct_data(&src->k)) {
+ bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
+ test_bit(ptr->dev, op->failed.d));
- if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(dst))) {
- ret = -EIO;
- goto err;
+ if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src))) {
+ ret = -EIO;
+ goto err;
+ }
}
+ if (dst != src)
+ memmove_u64s_down(dst, src, src->u64s);
dst = bkey_next(dst);
}
bio->bi_end_io = bch2_write_endio;
bio->bi_private = &op->cl;
- bio->bi_opf = REQ_OP_WRITE;
+ bio->bi_opf |= REQ_OP_WRITE;
if (!skip_put)
closure_get(bio->bi_private);
goto again;
}
+static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
+{
+ struct closure *cl = &op->cl;
+ struct bio *bio = &op->wbio.bio;
+ struct bvec_iter iter;
+ struct bkey_i_inline_data *id;
+ unsigned sectors;
+ int ret;
+
+ ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ BKEY_U64s + DIV_ROUND_UP(data_len, 8));
+ if (ret) {
+ op->error = ret;
+ goto err;
+ }
+
+ sectors = bio_sectors(bio);
+ op->pos.offset += sectors;
+
+ id = bkey_inline_data_init(op->insert_keys.top);
+ id->k.p = op->pos;
+ id->k.version = op->version;
+ id->k.size = sectors;
+
+ iter = bio->bi_iter;
+ iter.bi_size = data_len;
+ memcpy_from_bio(id->v.data, bio, iter);
+
+ while (data_len & 7)
+ id->v.data[data_len++] = '\0';
+ set_bkey_val_bytes(&id->k, data_len);
+ bch2_keylist_push(&op->insert_keys);
+
+ op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
+ continue_at_nobarrier(cl, bch2_write_index, NULL);
+ return;
+err:
+ bch2_write_done(&op->cl);
+}
+
/**
* bch_write - handle a write to a cache device or flash only volume
*
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bio *bio = &op->wbio.bio;
struct bch_fs *c = op->c;
+ unsigned data_len;
BUG_ON(!op->nr_replicas);
BUG_ON(!op->write_point.v);
BUG_ON(!bkey_cmp(op->pos, POS_MAX));
+ op->start_time = local_clock();
+ bch2_keylist_init(&op->insert_keys, op->inline_keys);
+ wbio_init(bio)->put_bio = false;
+
if (bio_sectors(bio) & (c->opts.block_size - 1)) {
__bcache_io_error(c, "misaligned write");
op->error = -EIO;
goto err;
}
- op->start_time = local_clock();
-
- bch2_keylist_init(&op->insert_keys, op->inline_keys);
- wbio_init(bio)->put_bio = false;
-
if (c->opts.nochanges ||
!percpu_ref_tryget(&c->writes)) {
__bcache_io_error(c, "read only");
bch2_increment_clock(c, bio_sectors(bio), WRITE);
+ data_len = min_t(u64, bio->bi_iter.bi_size,
+ op->new_i_size - (op->pos.offset << 9));
+
+ if (data_len <= min(block_bytes(c) / 2, 1024U)) {
+ bch2_write_data_inline(op, data_len);
+ return;
+ }
+
continue_at_nobarrier(cl, __bch2_write, NULL);
return;
err:
struct bpos pos = bkey_start_pos(k.k);
int pick_ret;
+ if (k.k->type == KEY_TYPE_inline_data) {
+ struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
+ unsigned bytes = min_t(unsigned, iter.bi_size,
+ bkey_val_bytes(d.k));
+
+ swap(iter.bi_size, bytes);
+ memcpy_to_bio(&orig->bio, iter, d.v->data);
+ swap(iter.bi_size, bytes);
+ bio_advance_iter(&orig->bio, &iter, bytes);
+ zero_fill_bio_iter(&orig->bio, iter);
+ goto out_read_done;
+ }
+
pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
/* hole or reservation - just zero fill: */