.key_len = sizeof(xfs_alloc_key_t),
.lru_refs = XFS_ALLOC_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_abtb_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
.key_len = sizeof(xfs_alloc_key_t),
.lru_refs = XFS_ALLOC_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_abtc_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
struct xfs_perag *pag,
xfs_btnum_t btnum)
{
+ const struct xfs_btree_ops *ops = &xfs_bnobt_ops;
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
- if (btnum == XFS_BTNUM_CNT) {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_cntbt_ops,
- mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
- } else {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_bnobt_ops,
- mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
- }
+ if (btnum == XFS_BTNUM_CNT)
+ ops = &xfs_cntbt_ops;
+ cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops, mp->m_alloc_maxlevels,
+ xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
-
return cur;
}
.key_len = sizeof(xfs_bmbt_key_t),
.lru_refs = XFS_BMAP_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2),
.dup_cursor = xfs_bmbt_dup_cursor,
.update_cursor = xfs_bmbt_update_cursor,
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops,
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0;
* Generic stats interface
*/
#define XFS_BTREE_STATS_INC(cur, stat) \
- XFS_STATS_INC_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat)
+ XFS_STATS_INC_OFF((cur)->bc_mp, \
+ (cur)->bc_ops->statoff + __XBTS_ ## stat)
#define XFS_BTREE_STATS_ADD(cur, stat, val) \
- XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
+ XFS_STATS_ADD_OFF((cur)->bc_mp, \
+ (cur)->bc_ops->statoff + __XBTS_ ## stat, val)
enum xbtree_key_contig {
XBTREE_KEY_GAP = 0,
/* LRU refcount to set on each btree buffer created */
unsigned int lru_refs;
+ /* offset of btree stats array */
+ unsigned int statoff;
+
/* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
void (*update_cursor)(struct xfs_btree_cur *src,
union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */
- int bc_statoff; /* offset of btree stats array */
/*
* Short btree pointers need an agno to be able to turn the pointers
.key_len = sizeof(xfs_inobt_key_t),
.lru_refs = XFS_INO_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_inobt_set_root,
.key_len = sizeof(xfs_inobt_key_t),
.lru_refs = XFS_INO_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
xfs_btnum_t btnum) /* ialloc or free ino btree */
{
struct xfs_mount *mp = pag->pag_mount;
+ const struct xfs_btree_ops *ops = &xfs_inobt_ops;
struct xfs_btree_cur *cur;
- if (btnum == XFS_BTNUM_INO) {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_inobt_ops,
- M_IGEO(mp)->inobt_maxlevels,
- xfs_inobt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
- } else {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_finobt_ops,
- M_IGEO(mp)->inobt_maxlevels,
- xfs_inobt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
- }
+ ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
+
+ if (btnum == XFS_BTNUM_FINO)
+ ops = &xfs_finobt_ops;
+ cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops,
+ M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
.key_len = sizeof(struct xfs_refcount_key),
.lru_refs = XFS_REFC_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2),
.dup_cursor = xfs_refcountbt_dup_cursor,
.set_root = xfs_refcountbt_set_root,
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
&xfs_refcountbt_ops, mp->m_refc_maxlevels,
xfs_refcountbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
-
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0;
.key_len = 2 * sizeof(struct xfs_rmap_key),
.lru_refs = XFS_RMAP_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_rmap_2),
.dup_cursor = xfs_rmapbt_dup_cursor,
.set_root = xfs_rmapbt_set_root,
/* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
-
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}