unsigned sectors;
        int ret;
 
+       bch2_check_set_feature(op->c, BCH_FEATURE_INLINE_DATA);
+
        ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
                                   ARRAY_SIZE(op->inline_keys),
                                   BKEY_U64s + DIV_ROUND_UP(data_len, 8));
 
                ret = bch2_check_set_has_compressed_data(c, v);
                break;
        case Opt_erasure_code:
-               if (v &&
-                   !(c->sb.features & (1ULL << BCH_FEATURE_EC))) {
-                       mutex_lock(&c->sb_lock);
-                       c->disk_sb.sb->features[0] |=
-                               cpu_to_le64(1ULL << BCH_FEATURE_EC);
-
-                       bch2_write_super(c);
-                       mutex_unlock(&c->sb_lock);
-               }
+               if (v)
+                       bch2_check_set_feature(c, BCH_FEATURE_EC);
                break;
        }
 
 
                write_sb = true;
        }
 
-       if (!(c->sb.features & (1ULL << BCH_FEATURE_INLINE_DATA))) {
-               c->disk_sb.sb->features[0] |=
-                       cpu_to_le64(1ULL << BCH_FEATURE_INLINE_DATA);
-               write_sb = true;
-       }
-
        if (!test_bit(BCH_FS_ERROR, &c->flags)) {
                c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
                write_sb = true;
 
        if (!percpu_ref_tryget(&c->writes))
                return -EROFS;
 
-       if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
-               mutex_lock(&c->sb_lock);
-               if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
-                       c->disk_sb.sb->features[0] |=
-                               cpu_to_le64(1ULL << BCH_FEATURE_REFLINK);
-
-                       bch2_write_super(c);
-               }
-               mutex_unlock(&c->sb_lock);
-       }
+       bch2_check_set_feature(c, BCH_FEATURE_REFLINK);
 
        dst_end.offset += remap_sectors;
        src_end.offset += remap_sectors;
 
        return ret;
 }
 
+void __bch2_check_set_feature(struct bch_fs *c, unsigned feat)
+{
+       mutex_lock(&c->sb_lock);
+       if (!(c->sb.features & (1ULL << feat))) {
+               c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << feat);
+
+               bch2_write_super(c);
+       }
+       mutex_unlock(&c->sb_lock);
+}
+
 /* BCH_SB_FIELD_journal: */
 
 static int u64_cmp(const void *_l, const void *_r)
 
                                   struct bch_sb_field *);
 };
 
-static inline bool bch2_sb_test_feature(struct bch_sb *sb,
-                                       enum bch_sb_features f)
-{
-       unsigned w = f / 64;
-       unsigned b = f % 64;
-
-       return le64_to_cpu(sb->features[w]) & (1ULL << b);
-}
-
-static inline void bch2_sb_set_feature(struct bch_sb *sb,
-                                      enum bch_sb_features f)
-{
-       if (!bch2_sb_test_feature(sb, f)) {
-               unsigned w = f / 64;
-               unsigned b = f % 64;
-
-               le64_add_cpu(&sb->features[w], 1ULL << b);
-       }
-}
-
 static inline __le64 bch2_sb_magic(struct bch_fs *c)
 {
        __le64 ret;
 
 int bch2_read_super(const char *, struct bch_opts *, struct bch_sb_handle *);
 int bch2_write_super(struct bch_fs *);
+void __bch2_check_set_feature(struct bch_fs *, unsigned);
+
+static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat)
+{
+       if (!(c->sb.features & (1ULL << feat)))
+               __bch2_check_set_feature(c, feat);
+}
 
 /* BCH_SB_FIELD_journal: */