d->events_per_period = 0;
 
+       d->seq_descs = NULL;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(amdtp_domain_init);
        queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
                                  amdtp_rate_table[d->irq_target->sfc]);
 
+       d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL);
+       if (!d->seq_descs)
+               return -ENOMEM;
+       d->seq_size = queue_size;
+       d->seq_tail = 0;
+
        if (ir_delay_cycle > 0) {
                struct fw_card *fw_card = fw_parent_device(s->unit)->card;
 
                err = get_current_cycle_time(fw_card, &cycle);
                if (err < 0)
-                       return err;
+                       goto error;
 
                // No need to care overflow in cycle field because of enough
                // width.
 error:
        list_for_each_entry(s, &d->streams, list)
                amdtp_stream_stop(s);
+       kfree(d->seq_descs);
+       d->seq_descs = NULL;
        return err;
 }
 EXPORT_SYMBOL_GPL(amdtp_domain_start);
 
        d->events_per_period = 0;
        d->irq_target = NULL;
+
+       kfree(d->seq_descs);
+       d->seq_descs = NULL;
 }
 EXPORT_SYMBOL_GPL(amdtp_domain_stop);
 
                                  msecs_to_jiffies(timeout)) > 0;
 }
 
+struct seq_desc {
+       unsigned int syt_offset;
+       unsigned int data_blocks;
+};
+
 struct amdtp_domain {
        struct list_head streams;
 
        unsigned int events_per_buffer;
 
        struct amdtp_stream *irq_target;
+
+       struct seq_desc *seq_descs;
+       unsigned int seq_size;
+       unsigned int seq_tail;
 };
 
 int amdtp_domain_init(struct amdtp_domain *d);