x(reflink, 6) \
x(new_siphash, 7) \
x(inline_data, 8) \
- x(new_extent_overwrite, 9)
+ x(new_extent_overwrite, 9) \
+ x(incompressible, 10)
enum bch_sb_feature {
#define x(f, n) BCH_FEATURE_##f,
};
#define BCH_COMPRESSION_TYPES() \
- x(none, 0) \
- x(lz4_old, 1) \
- x(gzip, 2) \
- x(lz4, 3) \
- x(zstd, 4)
+ x(none, 0) \
+ x(lz4_old, 1) \
+ x(gzip, 2) \
+ x(lz4, 3) \
+ x(zstd, 4) \
+ x(incompressible, 5)
enum bch_compression_type {
#define x(t, n) BCH_COMPRESSION_TYPE_##t,
BUG_ON(len_a + len_b > bio_sectors(bio));
BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
- BUG_ON(crc_old.compression_type);
+ BUG_ON(crc_is_compressed(crc_old));
BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
bch2_csum_type_is_encryption(new_csum_type));
if (i->crc)
*i->crc = (struct bch_extent_crc_unpacked) {
.csum_type = i->csum_type,
+ .compression_type = crc_old.compression_type,
.compressed_size = i->len,
.uncompressed_size = i->len,
.offset = 0,
static inline struct nonce extent_nonce(struct bversion version,
struct bch_extent_crc_unpacked crc)
{
- unsigned size = crc.compression_type ? crc.uncompressed_size : 0;
+ unsigned compression_type = crc_is_compressed(crc)
+ ? crc.compression_type
+ : 0;
+ unsigned size = compression_type ? crc.uncompressed_size : 0;
struct nonce nonce = (struct nonce) {{
[0] = cpu_to_le32(size << 22),
[1] = cpu_to_le32(version.lo),
[2] = cpu_to_le32(version.lo >> 32),
[3] = cpu_to_le32(version.hi|
- (crc.compression_type << 24))^BCH_NONCE_EXTENT,
+ (compression_type << 24))^BCH_NONCE_EXTENT,
}};
return nonce_add(nonce, crc.nonce << 9);
bio_unmap_or_unbounce(c, dst_data);
return compression_type;
err:
- compression_type = 0;
+ compression_type = BCH_COMPRESSION_TYPE_incompressible;
goto out;
}
if (!bch2_checksum_mergeable(crc_l.csum_type))
return BCH_MERGE_NOMERGE;
- if (crc_l.compression_type)
+ if (crc_is_compressed(crc_l))
return BCH_MERGE_NOMERGE;
if (crc_l.csum_type &&
static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
struct bch_extent_crc_unpacked n)
{
- return !u.compression_type &&
+ return !crc_is_compressed(u) &&
u.csum_type &&
u.uncompressed_size > u.live_size &&
bch2_csum_type_is_encryption(u.csum_type) ==
/* Find a checksum entry that covers only live data: */
if (!n.csum_type) {
bkey_for_each_crc(&k->k, ptrs, u, i)
- if (!u.compression_type &&
+ if (!crc_is_compressed(u) &&
u.csum_type &&
u.live_size == u.uncompressed_size) {
n = u;
return false;
}
found:
- BUG_ON(n.compression_type);
+ BUG_ON(crc_is_compressed(n));
BUG_ON(n.offset);
BUG_ON(n.live_size != k->k.size);
struct extent_ptr_decoded p;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- ret += !p.ptr.cached &&
- p.crc.compression_type == BCH_COMPRESSION_TYPE_none;
+ ret += !p.ptr.cached && !crc_is_compressed(p.crc);
}
return ret;
unsigned ret = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (!p.ptr.cached &&
- p.crc.compression_type != BCH_COMPRESSION_TYPE_none)
+ if (!p.ptr.cached && crc_is_compressed(p.crc))
ret += p.crc.compressed_size;
return ret;
}
+bool bch2_bkey_is_incompressible(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_crc_unpacked crc;
+
+ bkey_for_each_crc(k.k, ptrs, crc, entry)
+ if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
+ return true;
+ return false;
+}
+
bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
unsigned nr_replicas)
{
#undef common_fields
}
+static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
+{
+ return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
+ crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
+}
+
/* bkey_ptrs: generically over any key type that has ptrs */
struct bkey_ptrs_c {
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
+bool bch2_bkey_is_incompressible(struct bkey_s_c);
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
* particularly want to plumb io_opts all the way through the btree
* update stack right now
*/
- for_each_keylist_key(keys, k)
+ for_each_keylist_key(keys, k) {
bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
+ if (bch2_bkey_is_incompressible(bkey_i_to_s_c(k)))
+ bch2_check_set_feature(op->c, BCH_FEATURE_incompressible);
+
+ }
+
if (!bch2_keylist_empty(keys)) {
u64 sectors_start = keylist_sectors(keys);
int ret = op->index_update_fn(op);
/* Can we just write the entire extent as is? */
if (op->crc.uncompressed_size == op->crc.live_size &&
op->crc.compressed_size <= wp->sectors_free &&
- op->crc.compression_type == op->compression_type) {
- if (!op->crc.compression_type &&
+ (op->crc.compression_type == op->compression_type ||
+ op->incompressible)) {
+ if (!crc_is_compressed(op->crc) &&
op->csum_type != op->crc.csum_type &&
bch2_write_rechecksum(c, op, op->csum_type))
return PREP_ENCODED_CHECKSUM_ERR;
* If the data is compressed and we couldn't write the entire extent as
* is, we have to decompress it:
*/
- if (op->crc.compression_type) {
+ if (crc_is_compressed(op->crc)) {
struct bch_csum csum;
if (bch2_write_decrypt(op))
bch2_csum_type_is_encryption(op->crc.csum_type));
BUG_ON(op->compression_type && !bounce);
- crc.compression_type = op->compression_type
- ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
- op->compression_type)
+ crc.compression_type = op->incompressible
+ ? BCH_COMPRESSION_TYPE_incompressible
+ : op->compression_type
+ ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
+ op->compression_type)
: 0;
- if (!crc.compression_type) {
+ if (!crc_is_compressed(crc)) {
dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
}
if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
- !crc.compression_type &&
+ !crc_is_compressed(crc) &&
bch2_csum_type_is_encryption(op->crc.csum_type) ==
bch2_csum_type_is_encryption(op->csum_type)) {
/*
static struct promote_op *__promote_alloc(struct bch_fs *c,
enum btree_id btree_id,
+ struct bkey_s_c k,
struct bpos pos,
struct extent_ptr_decoded *pick,
struct bch_io_opts opts,
(struct data_opts) {
.target = opts.promote_target
},
- btree_id,
- bkey_s_c_null);
+ btree_id, k);
BUG_ON(ret);
return op;
k.k->type == KEY_TYPE_reflink_v
? BTREE_ID_REFLINK
: BTREE_ID_EXTENTS,
- pos, pick, opts, sectors, rbio);
+ k, pos, pick, opts, sectors, rbio);
if (!promote)
return NULL;
u64 data_offset = rbio->pos.offset - rbio->pick.crc.offset;
int ret;
- if (rbio->pick.crc.compression_type)
+ if (crc_is_compressed(rbio->pick.crc))
return;
bkey_on_stack_init(&new);
crc.offset += rbio->offset_into_extent;
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
- if (crc.compression_type != BCH_COMPRESSION_TYPE_none) {
+ if (crc_is_compressed(crc)) {
bch2_encrypt_bio(c, crc.csum_type, nonce, src);
if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
goto decompression_err;
}
if (rbio->narrow_crcs ||
- rbio->pick.crc.compression_type ||
+ crc_is_compressed(rbio->pick.crc) ||
bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
else if (rbio->pick.crc.csum_type)
EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
- if (pick.crc.compression_type != BCH_COMPRESSION_TYPE_none ||
+ if (crc_is_compressed(pick.crc) ||
(pick.crc.csum_type != BCH_CSUM_NONE &&
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
(bch2_csum_type_is_encryption(pick.crc.csum_type) &&
&rbio, &bounce, &read_full);
if (!read_full) {
- EBUG_ON(pick.crc.compression_type);
+ EBUG_ON(crc_is_compressed(pick.crc));
EBUG_ON(pick.crc.csum_type &&
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
bvec_iter_sectors(iter) != pick.crc.live_size ||
unsigned nr_replicas:4;
unsigned nr_replicas_required:4;
unsigned alloc_reserve:4;
+ unsigned incompressible:1;
struct bch_devs_list devs_have;
u16 target;
enum btree_id btree_id,
struct bkey_s_c k)
{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
int ret;
m->btree_id = btree_id;
m->nr_ptrs_reserved = 0;
bch2_write_op_init(&m->op, c, io_opts);
- m->op.compression_type =
- bch2_compression_opt_to_type[io_opts.background_compression ?:
- io_opts.compression];
+
+ if (!bch2_bkey_is_incompressible(k))
+ m->op.compression_type =
+ bch2_compression_opt_to_type[io_opts.background_compression ?:
+ io_opts.compression];
+ else
+ m->op.incompressible = true;
+
m->op.target = data_opts.target,
m->op.write_point = wp;
break;
}
case DATA_REWRITE: {
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
unsigned compressed_sectors = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached &&
- p.crc.compression_type != BCH_COMPRESSION_TYPE_none &&
+ crc_is_compressed(p.crc) &&
bch2_dev_in_target(c, p.ptr.dev, data_opts.target))
compressed_sectors += p.crc.compressed_size;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
- if (io_opts->background_compression)
+ if (io_opts->background_compression &&
+ !bch2_bkey_is_incompressible(k))
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached &&
p.crc.compression_type !=
struct extent_ptr_decoded p;
extent_for_each_ptr_decode(e, p, entry) {
- if (p.crc.compression_type == BCH_COMPRESSION_TYPE_none) {
+ if (!crc_is_compressed(p.crc)) {
nr_uncompressed_extents++;
uncompressed_sectors += e.k->size;
} else {