}
 }
 
-int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
-                           struct btrfs_delayed_ref_root *delayed_refs,
-                           u64 seq)
+int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
 {
        struct seq_list *elem;
        int ret = 0;
                                        struct seq_list, list);
                if (seq >= elem->seq) {
                        btrfs_debug(fs_info,
-                               "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
+                               "holding back delayed_ref %#x.%x, lowest is %#x.%x",
                                (u32)(seq >> 32), (u32)seq,
-                               (u32)(elem->seq >> 32), (u32)elem->seq,
-                               delayed_refs);
+                               (u32)(elem->seq >> 32), (u32)elem->seq);
                        ret = 1;
                }
        }
 
 struct btrfs_delayed_ref_head *
 btrfs_select_ref_head(struct btrfs_trans_handle *trans);
 
-int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
-                           struct btrfs_delayed_ref_root *delayed_refs,
-                           u64 seq);
+int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
 
 /*
  * helper functions to cast a node into its container
 
                ref = select_delayed_ref(locked_ref);
 
                if (ref && ref->seq &&
-                   btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
+                   btrfs_check_delayed_seq(fs_info, ref->seq)) {
                        spin_unlock(&locked_ref->lock);
                        unselect_delayed_ref_head(delayed_refs, locked_ref);
                        locked_ref = NULL;