This makes an array of strings available, like our other enums.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
                 * Don't strand buckets on the copygc freelist until
                 * after recovery is finished:
                 */
-               if (i == RESERVE_MOVINGGC &&
+               if (i == RESERVE_movinggc &&
                    !test_bit(BCH_FS_STARTED, &c->flags))
                        continue;
 
                 * allocations for foreground writes must wait -
                 * not -ENOSPC calculations.
                 */
-               for (j = 0; j < RESERVE_NONE; j++)
+               for (j = 0; j < RESERVE_none; j++)
                        dev_reserve += ca->free[j].size;
 
                dev_reserve += 1;       /* btree write point */
 
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 
+const char * const bch2_alloc_reserves[] = {
+#define x(t) #t,
+       BCH_ALLOC_RESERVES()
+#undef x
+       NULL
+};
+
 /*
  * Open buckets represent a bucket that's currently being allocated from.  They
  * serve two purposes:
 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
 {
        switch (reserve) {
-       case RESERVE_BTREE:
-       case RESERVE_BTREE_MOVINGGC:
+       case RESERVE_btree:
+       case RESERVE_btree_movinggc:
                return 0;
-       case RESERVE_MOVINGGC:
+       case RESERVE_movinggc:
                return OPEN_BUCKETS_COUNT / 4;
        default:
                return OPEN_BUCKETS_COUNT / 2;
                        c->blocked_allocate_open_bucket = local_clock();
 
                spin_unlock(&c->freelist_lock);
-               trace_open_bucket_alloc_fail(ca, reserve);
+               trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
                return ERR_PTR(-OPEN_BUCKETS_EMPTY);
        }
 
-       if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
+       if (likely(fifo_pop(&ca->free[RESERVE_none], b)))
                goto out;
 
        switch (reserve) {
-       case RESERVE_BTREE_MOVINGGC:
-       case RESERVE_MOVINGGC:
-               if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
+       case RESERVE_btree_movinggc:
+       case RESERVE_movinggc:
+               if (fifo_pop(&ca->free[RESERVE_movinggc], b))
                        goto out;
                break;
        default:
 
        spin_unlock(&c->freelist_lock);
 
-       trace_bucket_alloc_fail(ca, reserve);
+       trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
        return ERR_PTR(-FREELIST_EMPTY);
 out:
        verify_not_on_freelist(c, ca, b);
 
        bch2_wake_allocator(ca);
 
-       trace_bucket_alloc(ca, reserve);
+       trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
        return ob;
 }
 
 
 struct bch_fs;
 struct bch_devs_List;
 
+extern const char * const bch2_alloc_reserves[];
+
 struct dev_alloc_list {
        unsigned        nr;
        u8              devs[BCH_SB_MEMBERS_MAX];
 
 #undef x
 };
 
+#define BCH_ALLOC_RESERVES()           \
+       x(btree_movinggc)               \
+       x(btree)                        \
+       x(movinggc)                     \
+       x(none)
+
 enum alloc_reserve {
-       RESERVE_BTREE_MOVINGGC  = -2,
-       RESERVE_BTREE           = -1,
-       RESERVE_MOVINGGC        = 0,
-       RESERVE_NONE            = 1,
-       RESERVE_NR              = 2,
+#define x(name)        RESERVE_##name,
+       BCH_ALLOC_RESERVES()
+#undef x
+       RESERVE_NR
 };
 
 typedef FIFO(long)     alloc_fifo;
 
 
        if (flags & BTREE_INSERT_USE_RESERVE) {
                nr_reserve      = 0;
-               alloc_reserve   = RESERVE_BTREE_MOVINGGC;
+               alloc_reserve   = RESERVE_btree_movinggc;
        } else {
                nr_reserve      = BTREE_NODE_RESERVE;
-               alloc_reserve   = RESERVE_BTREE;
+               alloc_reserve   = RESERVE_btree;
        }
 
        mutex_lock(&c->btree_reserve_cache_lock);
 
             !(buckets_nouse    = kvpmalloc(BITS_TO_LONGS(nbuckets) *
                                            sizeof(unsigned long),
                                            GFP_KERNEL|__GFP_ZERO))) ||
-           !init_fifo(&free[RESERVE_MOVINGGC],
+           !init_fifo(&free[RESERVE_movinggc],
                       copygc_reserve, GFP_KERNEL) ||
-           !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
+           !init_fifo(&free[RESERVE_none], reserve_none, GFP_KERNEL) ||
            !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
            !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
                goto err;
 
                                            &nr_have_parity,
                                            &have_cache,
                                            h->copygc
-                                           ? RESERVE_MOVINGGC
-                                           : RESERVE_NONE,
+                                           ? RESERVE_movinggc
+                                           : RESERVE_none,
                                            0,
                                            cl);
 
                                            &nr_have_data,
                                            &have_cache,
                                            h->copygc
-                                           ? RESERVE_MOVINGGC
-                                           : RESERVE_NONE,
+                                           ? RESERVE_movinggc
+                                           : RESERVE_none,
                                            0,
                                            cl);
 
 
 
 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
 {
-       return op->alloc_reserve == RESERVE_MOVINGGC
+       return op->alloc_reserve == RESERVE_movinggc
                ? op->c->copygc_wq
                : op->c->btree_update_wq;
 }
        op->compression_type    = bch2_compression_opt_to_type[opts.compression];
        op->nr_replicas         = 0;
        op->nr_replicas_required = c->opts.data_replicas_required;
-       op->alloc_reserve       = RESERVE_NONE;
+       op->alloc_reserve       = RESERVE_none;
        op->incompressible      = 0;
        op->open_buckets.nr     = 0;
        op->devs_have.nr        = 0;
 
                        }
                } else {
                        rcu_read_lock();
-                       ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
+                       ob = bch2_bucket_alloc(c, ca, RESERVE_none,
                                               false, cl);
                        rcu_read_unlock();
                        if (IS_ERR(ob)) {
 
                }
 
        if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) {
-               m->op.alloc_reserve = RESERVE_MOVINGGC;
+               m->op.alloc_reserve = RESERVE_movinggc;
                m->op.flags |= BCH_WRITE_ALLOC_NOWAIT;
        } else {
                /* XXX: this should probably be passed in */
 
 #include <linux/sort.h>
 #include <linux/wait.h>
 
-/*
- * We can't use the entire copygc reserve in one iteration of copygc: we may
- * need the buckets we're freeing up to go back into the copygc reserve to make
- * forward progress, but if the copygc reserve is full they'll be available for
- * any allocation - and it's possible that in a given iteration, we free up most
- * of the buckets we're going to free before we allocate most of the buckets
- * we're going to allocate.
- *
- * If we only use half of the reserve per iteration, then in steady state we'll
- * always have room in the reserve for the buckets we're going to need in the
- * next iteration:
- */
-#define COPYGC_BUCKETS_PER_ITER(ca)                                    \
-       ((ca)->free[RESERVE_MOVINGGC].size / 2)
-
 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
 {
        const struct copygc_heap_entry *l = _l;
        bool ret;
 
        spin_lock(&ca->fs->freelist_lock);
-       ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
+       ret = fifo_full(&ca->free[RESERVE_movinggc]) ||
                ca->allocator_state != ALLOCATOR_running;
        spin_unlock(&ca->fs->freelist_lock);
 
                closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
 
                spin_lock(&ca->fs->freelist_lock);
-               sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
+               sectors_reserved += fifo_used(&ca->free[RESERVE_movinggc]) * ca->mi.bucket_size;
                spin_unlock(&ca->fs->freelist_lock);
        }
 
        }
 
        /*
-        * Our btree node allocations also come out of RESERVE_MOVINGGC:
+        * Our btree node allocations also come out of RESERVE_movingc:
         */
        sectors_reserved = (sectors_reserved * 3) / 4;
        if (!sectors_reserved) {
 
               stats.buckets_ec,
               __dev_buckets_available(ca, stats),
               fifo_used(&ca->free_inc),                ca->free_inc.size,
-              fifo_used(&ca->free[RESERVE_MOVINGGC]),  ca->free[RESERVE_MOVINGGC].size,
-              fifo_used(&ca->free[RESERVE_NONE]),      ca->free[RESERVE_NONE].size,
+              fifo_used(&ca->free[RESERVE_movinggc]),  ca->free[RESERVE_movinggc].size,
+              fifo_used(&ca->free[RESERVE_none]),      ca->free[RESERVE_none].size,
               c->freelist_wait.list.first              ? "waiting" : "empty",
               OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
               ca->nr_open_buckets,
 
 );
 
 DECLARE_EVENT_CLASS(bucket_alloc,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve),
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve),
 
        TP_STRUCT__entry(
                __field(dev_t,                  dev     )
-               __field(enum alloc_reserve,     reserve )
+               __array(char,   reserve,        16      )
        ),
 
        TP_fast_assign(
                __entry->dev            = ca->dev;
-               __entry->reserve        = reserve;
+               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
        ),
 
-       TP_printk("%d,%d reserve %d",
+       TP_printk("%d,%d reserve %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->reserve)
 );
 
 DEFINE_EVENT(bucket_alloc, bucket_alloc,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve)
 );
 
 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve)
 );
 
 DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve)
 );
 
 /* Moving IO */