EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
- const unsigned int seq_size, unsigned int seq_tail,
- unsigned int count)
+ unsigned int size, unsigned int pos, unsigned int count)
{
const unsigned int syt_interval = s->syt_interval;
int i;
for (i = 0; i < count; ++i) {
- struct seq_desc *desc = descs + seq_tail;
+ struct seq_desc *desc = descs + pos;
if (desc->syt_offset != CIP_SYT_NO_INFO)
desc->data_blocks = syt_interval;
else
desc->data_blocks = 0;
- seq_tail = (seq_tail + 1) % seq_size;
+ pos = (pos + 1) % size;
}
}
static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
- const unsigned int seq_size, unsigned int seq_tail,
+ unsigned int size, unsigned int pos,
unsigned int count)
{
const enum cip_sfc sfc = s->sfc;
int i;
for (i = 0; i < count; ++i) {
- struct seq_desc *desc = descs + seq_tail;
+ struct seq_desc *desc = descs + pos;
if (!cip_sfc_is_base_44100(sfc)) {
// Sample_rate / 8000 is an integer, and precomputed.
state = phase;
}
- seq_tail = (seq_tail + 1) % seq_size;
+ pos = (pos + 1) % size;
}
s->ctx_data.rx.data_block_state = state;
}
static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
- const unsigned int seq_size, unsigned int seq_tail,
- unsigned int count)
+ unsigned int size, unsigned int pos, unsigned int count)
{
const enum cip_sfc sfc = s->sfc;
unsigned int last = s->ctx_data.rx.last_syt_offset;
int i;
for (i = 0; i < count; ++i) {
- struct seq_desc *desc = descs + seq_tail;
+ struct seq_desc *desc = descs + pos;
desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
- seq_tail = (seq_tail + 1) % seq_size;
+ pos = (pos + 1) % size;
}
s->ctx_data.rx.last_syt_offset = last;
s->ctx_data.tx.cache.tail = cache_tail;
}
-static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
+static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
+ unsigned int pos, unsigned int count)
{
- struct seq_desc *descs = s->ctx_data.rx.seq.descs;
- unsigned int seq_tail = s->ctx_data.rx.seq.tail;
- const unsigned int seq_size = s->ctx_data.rx.seq.size;
-
- pool_ideal_syt_offsets(s, descs, seq_size, seq_tail, count);
+ pool_ideal_syt_offsets(s, descs, size, pos, count);
if (s->flags & CIP_BLOCKING)
- pool_blocking_data_blocks(s, descs, seq_size, seq_tail, count);
+ pool_blocking_data_blocks(s, descs, size, pos, count);
else
- pool_ideal_nonblocking_data_blocks(s, descs, seq_size, seq_tail, count);
-
- s->ctx_data.rx.seq.tail = (seq_tail + count) % seq_size;
+ pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
}
-static void pool_replayed_seq(struct amdtp_stream *s, unsigned int count)
+static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
+ unsigned int pos, unsigned int count)
{
struct amdtp_stream *target = s->ctx_data.rx.replay_target;
const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
const unsigned int cache_size = target->ctx_data.tx.cache.size;
unsigned int cache_head = s->ctx_data.rx.cache_head;
- struct seq_desc *descs = s->ctx_data.rx.seq.descs;
- const unsigned int seq_size = s->ctx_data.rx.seq.size;
- unsigned int seq_tail = s->ctx_data.rx.seq.tail;
int i;
for (i = 0; i < count; ++i) {
- descs[seq_tail] = cache[cache_head];
- seq_tail = (seq_tail + 1) % seq_size;
+ descs[pos] = cache[cache_head];
cache_head = (cache_head + 1) % cache_size;
+ pos = (pos + 1) % size;
}
- s->ctx_data.rx.seq.tail = seq_tail;
s->ctx_data.rx.cache_head = cache_head;
}
static void pool_seq_descs(struct amdtp_stream *s, unsigned int count)
{
struct amdtp_domain *d = s->domain;
+ struct seq_desc *descs = s->ctx_data.rx.seq.descs;
+ const unsigned int size = s->ctx_data.rx.seq.size;
+ unsigned int pos = s->ctx_data.rx.seq.pos;
+ void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
+ unsigned int pos, unsigned int count);
if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
- pool_ideal_seq_descs(s, count);
+ pool_seq_descs = pool_ideal_seq_descs;
} else {
if (!d->replay.on_the_fly) {
- pool_replayed_seq(s, count);
+ pool_seq_descs = pool_replayed_seq;
} else {
struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
const unsigned int cache_size = tx->ctx_data.tx.cache.size;
unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_head);
if (cached_cycles > count && cached_cycles > cache_size / 2)
- pool_replayed_seq(s, count);
+ pool_seq_descs = pool_replayed_seq;
else
- pool_ideal_seq_descs(s, count);
+ pool_seq_descs = pool_ideal_seq_descs;
}
}
+
+ pool_seq_descs(s, descs, size, pos, count);
+
+ s->ctx_data.rx.seq.pos = (pos + count) % size;
}
static void update_pcm_pointers(struct amdtp_stream *s,
goto err_context;
}
s->ctx_data.rx.seq.size = queue_size;
- s->ctx_data.rx.seq.tail = 0;
+ s->ctx_data.rx.seq.pos = 0;
s->ctx_data.rx.seq.head = 0;
entry = &initial_state[s->sfc];