ALSA: firewire-lib: code refactoring for helper functions to pool sequence in rx...
authorTakashi Sakamoto <o-takashi@sakamocchi.jp>
Sat, 7 Jan 2023 02:32:11 +0000 (11:32 +0900)
committerTakashi Iwai <tiwai@suse.de>
Sun, 8 Jan 2023 14:00:08 +0000 (15:00 +0100)
When scheduling transmission of rx packets, current implementation pools
sequence descriptors at first for media clock. Two methods are used for
the purpose depending on four cases, while the implementations do not
necessarily have good readability.

This commit refactors them by adding function pointers and functions
arguments.

Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
Link: https://lore.kernel.org/r/20230107023214.29132-2-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Iwai <tiwai@suse.de>
sound/firewire/amdtp-stream.c
sound/firewire/amdtp-stream.h

index 9be2260e4ca2d26b41c9784b686ae6080a0e6954..881e30c049fc92f4986fcd964a82b95af81d92f5 100644 (file)
@@ -349,26 +349,25 @@ void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
 
 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
-                                     const unsigned int seq_size, unsigned int seq_tail,
-                                     unsigned int count)
+                                     unsigned int size, unsigned int pos, unsigned int count)
 {
        const unsigned int syt_interval = s->syt_interval;
        int i;
 
        for (i = 0; i < count; ++i) {
-               struct seq_desc *desc = descs + seq_tail;
+               struct seq_desc *desc = descs + pos;
 
                if (desc->syt_offset != CIP_SYT_NO_INFO)
                        desc->data_blocks = syt_interval;
                else
                        desc->data_blocks = 0;
 
-               seq_tail = (seq_tail + 1) % seq_size;
+               pos = (pos + 1) % size;
        }
 }
 
 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
-                                              const unsigned int seq_size, unsigned int seq_tail,
+                                              unsigned int size, unsigned int pos,
                                               unsigned int count)
 {
        const enum cip_sfc sfc = s->sfc;
@@ -376,7 +375,7 @@ static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct se
        int i;
 
        for (i = 0; i < count; ++i) {
-               struct seq_desc *desc = descs + seq_tail;
+               struct seq_desc *desc = descs + pos;
 
                if (!cip_sfc_is_base_44100(sfc)) {
                        // Sample_rate / 8000 is an integer, and precomputed.
@@ -403,7 +402,7 @@ static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct se
                        state = phase;
                }
 
-               seq_tail = (seq_tail + 1) % seq_size;
+               pos = (pos + 1) % size;
        }
 
        s->ctx_data.rx.data_block_state = state;
@@ -449,8 +448,7 @@ static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
 }
 
 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
-                                  const unsigned int seq_size, unsigned int seq_tail,
-                                  unsigned int count)
+                                  unsigned int size, unsigned int pos, unsigned int count)
 {
        const enum cip_sfc sfc = s->sfc;
        unsigned int last = s->ctx_data.rx.last_syt_offset;
@@ -458,11 +456,11 @@ static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *desc
        int i;
 
        for (i = 0; i < count; ++i) {
-               struct seq_desc *desc = descs + seq_tail;
+               struct seq_desc *desc = descs + pos;
 
                desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
 
-               seq_tail = (seq_tail + 1) % seq_size;
+               pos = (pos + 1) % size;
        }
 
        s->ctx_data.rx.last_syt_offset = last;
@@ -531,52 +529,49 @@ static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *descs, unsi
        s->ctx_data.tx.cache.tail = cache_tail;
 }
 
-static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
+static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
+                                unsigned int pos, unsigned int count)
 {
-       struct seq_desc *descs = s->ctx_data.rx.seq.descs;
-       unsigned int seq_tail = s->ctx_data.rx.seq.tail;
-       const unsigned int seq_size = s->ctx_data.rx.seq.size;
-
-       pool_ideal_syt_offsets(s, descs, seq_size, seq_tail, count);
+       pool_ideal_syt_offsets(s, descs, size, pos, count);
 
        if (s->flags & CIP_BLOCKING)
-               pool_blocking_data_blocks(s, descs, seq_size, seq_tail, count);
+               pool_blocking_data_blocks(s, descs, size, pos, count);
        else
-               pool_ideal_nonblocking_data_blocks(s, descs, seq_size, seq_tail, count);
-
-       s->ctx_data.rx.seq.tail = (seq_tail + count) % seq_size;
+               pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
 }
 
-static void pool_replayed_seq(struct amdtp_stream *s, unsigned int count)
+static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
+                             unsigned int pos, unsigned int count)
 {
        struct amdtp_stream *target = s->ctx_data.rx.replay_target;
        const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
        const unsigned int cache_size = target->ctx_data.tx.cache.size;
        unsigned int cache_head = s->ctx_data.rx.cache_head;
-       struct seq_desc *descs = s->ctx_data.rx.seq.descs;
-       const unsigned int seq_size = s->ctx_data.rx.seq.size;
-       unsigned int seq_tail = s->ctx_data.rx.seq.tail;
        int i;
 
        for (i = 0; i < count; ++i) {
-               descs[seq_tail] = cache[cache_head];
-               seq_tail = (seq_tail + 1) % seq_size;
+               descs[pos] = cache[cache_head];
                cache_head = (cache_head + 1) % cache_size;
+               pos = (pos + 1) % size;
        }
 
-       s->ctx_data.rx.seq.tail = seq_tail;
        s->ctx_data.rx.cache_head = cache_head;
 }
 
 static void pool_seq_descs(struct amdtp_stream *s, unsigned int count)
 {
        struct amdtp_domain *d = s->domain;
+       struct seq_desc *descs = s->ctx_data.rx.seq.descs;
+       const unsigned int size = s->ctx_data.rx.seq.size;
+       unsigned int pos = s->ctx_data.rx.seq.pos;
+       void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
+                              unsigned int pos, unsigned int count);
 
        if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
-               pool_ideal_seq_descs(s, count);
+               pool_seq_descs = pool_ideal_seq_descs;
        } else {
                if (!d->replay.on_the_fly) {
-                       pool_replayed_seq(s, count);
+                       pool_seq_descs = pool_replayed_seq;
                } else {
                        struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
                        const unsigned int cache_size = tx->ctx_data.tx.cache.size;
@@ -584,11 +579,15 @@ static void pool_seq_descs(struct amdtp_stream *s, unsigned int count)
                        unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_head);
 
                        if (cached_cycles > count && cached_cycles > cache_size / 2)
-                               pool_replayed_seq(s, count);
+                               pool_seq_descs = pool_replayed_seq;
                        else
-                               pool_ideal_seq_descs(s, count);
+                               pool_seq_descs = pool_ideal_seq_descs;
                }
        }
+
+       pool_seq_descs(s, descs, size, pos, count);
+
+       s->ctx_data.rx.seq.pos = (pos + count) % size;
 }
 
 static void update_pcm_pointers(struct amdtp_stream *s,
@@ -1644,7 +1643,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
                        goto err_context;
                }
                s->ctx_data.rx.seq.size = queue_size;
-               s->ctx_data.rx.seq.tail = 0;
+               s->ctx_data.rx.seq.pos = 0;
                s->ctx_data.rx.seq.head = 0;
 
                entry = &initial_state[s->sfc];
index 1f957c946c956a3a32a743e9b7b218526797e4d3..baab63d31dddc8d42f96c41d9ea152550a16020c 100644 (file)
@@ -159,7 +159,7 @@ struct amdtp_stream {
                        struct {
                                struct seq_desc *descs;
                                unsigned int size;
-                               unsigned int tail;
+                               unsigned int pos;
                                unsigned int head;
                        } seq;