struct alloc_heap_entry l,
struct alloc_heap_entry r)
{
- return (l.key > r.key) - (l.key < r.key) ?:
- (l.nr < r.nr) - (l.nr > r.nr) ?:
- (l.bucket > r.bucket) - (l.bucket < r.bucket);
+ return cmp_int(l.key, r.key) ?:
+ cmp_int(r.nr, l.nr) ?:
+ cmp_int(l.bucket, r.bucket);
}
static inline int bucket_idx_cmp(const void *_l, const void *_r)
{
const struct alloc_heap_entry *l = _l, *r = _r;
- return (l->bucket > r->bucket) - (l->bucket < r->bucket);
+ return cmp_int(l->bucket, r->bucket);
}
static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
r_v = *r;
}
- return (l_v > r_v) - (l_v < r_v);
+ return cmp_int(l_v, r_v);
}
#endif
static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
{
- return (l.hi > r.hi) - (l.hi < r.hi) ?:
- (l.lo > r.lo) - (l.lo < r.lo);
+ return cmp_int(l.hi, r.hi) ?:
+ cmp_int(l.lo, r.lo);
}
#define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
{
return bkey_cmp_packed(b, l, r)
?: (int) bkey_deleted(r) - (int) bkey_deleted(l)
- ?: (l > r) - (l < r);
+ ?: cmp_int(l, r);
}
static inline int btree_node_iter_cmp(struct btree *b,
static inline int btree_trans_cmp(struct btree_insert_entry l,
struct btree_insert_entry r)
{
- return (l.deferred > r.deferred) - (l.deferred < r.deferred) ?:
+ return cmp_int(l.deferred, r.deferred) ?:
btree_iter_cmp(l.iter, r.iter);
}
unsigned l = *((const unsigned *) _l);
unsigned r = *((const unsigned *) _r);
- return (l > r) - (l < r);
+ return cmp_int(l, r);
}
/* pick most common bucket size: */
static inline int ptrcmp(void *l, void *r)
{
- return (l > r) - (l < r);
+ return cmp_int(l, r);
}
#define __bch2_lock_inodes(_lock, ...) \
const struct journal_seq_blacklist_table_entry *l = _l;
const struct journal_seq_blacklist_table_entry *r = _r;
- return (l->start > r->start) - (l->start < r->start);
+ return cmp_int(l->start, r->start);
}
bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
struct copygc_heap_entry l,
struct copygc_heap_entry r)
{
- return (l.sectors > r.sectors) - (l.sectors < r.sectors);
+ return cmp_int(l.sectors, r.sectors);
}
static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
const struct copygc_heap_entry *l = _l;
const struct copygc_heap_entry *r = _r;
- return (l->offset > r->offset) - (l->offset < r->offset);
+ return cmp_int(l->offset, r->offset);
}
static bool __copygc_pred(struct bch_dev *ca,
static inline int u8_cmp(u8 l, u8 r)
{
- return (l > r) - (l < r);
+ return cmp_int(l, r);
}
static void verify_replicas_entry_sorted(struct bch_replicas_entry *e)
static int unsigned_cmp(const void *_l, const void *_r)
{
- unsigned l = *((unsigned *) _l);
- unsigned r = *((unsigned *) _r);
+ const unsigned *l = _l;
+ const unsigned *r = _r;
- return (l > r) - (l < r);
+ return cmp_int(*l, *r);
}
static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
+#define cmp_int(l, r) ((l > r) - (l < r))
+
#endif /* _BCACHEFS_UTIL_H */