Commit 6f24bb8a authored by Takashi Sakamoto's avatar Takashi Sakamoto Committed by Takashi Iwai

ALSA: firewire-lib: pool sequence of packet in IT context independently

Current implementation pools the sequence in AMDTP domain. This is convenient
regarding to memory usage and computation time, however inconvenient for the
devices such that several rx streams are expected to transfer timing
information independently.

This commit refactors to pool the sequence per rx packet stream.
Signed-off-by: default avatarTakashi Sakamoto <o-takashi@sakamocchi.jp>
Link: https://lore.kernel.org/r/20210522013303.49596-3-o-takashi@sakamocchi.jpSigned-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent 8070d265
...@@ -441,6 +441,30 @@ static unsigned int calculate_syt_offset(unsigned int *last_syt_offset, ...@@ -441,6 +441,30 @@ static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
return syt_offset; return syt_offset;
} }
static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
{
unsigned int seq_tail = s->ctx_data.rx.seq.tail;
const unsigned int seq_size = s->ctx_data.rx.seq.size;
const unsigned int syt_interval = s->syt_interval;
const enum cip_sfc sfc = s->sfc;
const bool is_blocking = !!(s->flags & CIP_BLOCKING);
int i;
for (i = 0; i < count; ++i) {
struct seq_desc *desc = s->ctx_data.rx.seq.descs + seq_tail;
desc->syt_offset = calculate_syt_offset(&s->ctx_data.rx.last_syt_offset,
&s->ctx_data.rx.syt_offset_state, sfc);
desc->data_blocks = calculate_data_blocks(&s->ctx_data.rx.data_block_state,
is_blocking, desc->syt_offset == CIP_SYT_NO_INFO,
syt_interval, sfc);
seq_tail = (seq_tail + 1) % seq_size;
}
s->ctx_data.rx.seq.tail = seq_tail;
}
static void update_pcm_pointers(struct amdtp_stream *s, static void update_pcm_pointers(struct amdtp_stream *s,
struct snd_pcm_substream *pcm, struct snd_pcm_substream *pcm,
unsigned int frames) unsigned int frames)
...@@ -833,14 +857,14 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs, ...@@ -833,14 +857,14 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
unsigned int seq_size) unsigned int seq_size)
{ {
unsigned int dbc = s->data_block_counter; unsigned int dbc = s->data_block_counter;
unsigned int seq_index = s->ctx_data.rx.seq_index; unsigned int seq_head = s->ctx_data.rx.seq.head;
bool aware_syt = !(s->flags & CIP_UNAWARE_SYT); bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
int i; int i;
for (i = 0; i < packets; ++i) { for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i; struct pkt_desc *desc = descs + i;
unsigned int index = (s->packet_index + i) % s->queue_size; unsigned int index = (s->packet_index + i) % s->queue_size;
const struct seq_desc *seq = seq_descs + seq_index; const struct seq_desc *seq = seq_descs + seq_head;
desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
...@@ -863,13 +887,13 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs, ...@@ -863,13 +887,13 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
desc->ctx_payload = s->buffer.packets[index].buffer; desc->ctx_payload = s->buffer.packets[index].buffer;
seq_index = (seq_index + 1) % seq_size; seq_head = (seq_head + 1) % seq_size;
++ctx_header; ++ctx_header;
} }
s->data_block_counter = dbc; s->data_block_counter = dbc;
s->ctx_data.rx.seq_index = seq_index; s->ctx_data.rx.seq.head = seq_head;
} }
static inline void cancel_stream(struct amdtp_stream *s) static inline void cancel_stream(struct amdtp_stream *s)
...@@ -911,8 +935,10 @@ static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_ ...@@ -911,8 +935,10 @@ static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_
// Calculate the number of packets in buffer and check XRUN. // Calculate the number of packets in buffer and check XRUN.
packets = header_length / sizeof(*ctx_header); packets = header_length / sizeof(*ctx_header);
generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq.descs, pool_ideal_seq_descs(s, packets);
d->seq.size);
generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, s->ctx_data.rx.seq.descs,
s->ctx_data.rx.seq.size);
process_ctx_payloads(s, s->pkt_descs, packets); process_ctx_payloads(s, s->pkt_descs, packets);
...@@ -1152,51 +1178,6 @@ static void process_tx_packets_intermediately(struct fw_iso_context *context, u3 ...@@ -1152,51 +1178,6 @@ static void process_tx_packets_intermediately(struct fw_iso_context *context, u3
} }
} }
static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
{
struct amdtp_stream *irq_target = d->irq_target;
unsigned int seq_tail = d->seq.tail;
unsigned int seq_size = d->seq.size;
unsigned int min_avail;
struct amdtp_stream *s;
min_avail = d->seq.size;
list_for_each_entry(s, &d->streams, list) {
unsigned int seq_index;
unsigned int avail;
if (s->direction == AMDTP_IN_STREAM)
continue;
seq_index = s->ctx_data.rx.seq_index;
avail = d->seq.tail;
if (seq_index > avail)
avail += d->seq.size;
avail -= seq_index;
if (avail < min_avail)
min_avail = avail;
}
while (min_avail < packets) {
struct seq_desc *desc = d->seq.descs + seq_tail;
desc->syt_offset = calculate_syt_offset(&d->last_syt_offset,
&d->syt_offset_state, irq_target->sfc);
desc->data_blocks = calculate_data_blocks(&d->data_block_state,
!!(irq_target->flags & CIP_BLOCKING),
desc->syt_offset == CIP_SYT_NO_INFO,
irq_target->syt_interval, irq_target->sfc);
++seq_tail;
seq_tail %= seq_size;
++min_avail;
}
d->seq.tail = seq_tail;
}
static void process_ctxs_in_domain(struct amdtp_domain *d) static void process_ctxs_in_domain(struct amdtp_domain *d)
{ {
struct amdtp_stream *s; struct amdtp_stream *s;
...@@ -1225,9 +1206,6 @@ static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size ...@@ -1225,9 +1206,6 @@ static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size
{ {
struct amdtp_stream *s = private_data; struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain; struct amdtp_domain *d = s->domain;
unsigned int packets = header_length / sizeof(__be32);
pool_ideal_seq_descs(d, packets);
process_rx_packets(context, tstamp, header_length, header, private_data); process_rx_packets(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d); process_ctxs_in_domain(d);
...@@ -1238,9 +1216,6 @@ static void irq_target_callback_intermediately(struct fw_iso_context *context, u ...@@ -1238,9 +1216,6 @@ static void irq_target_callback_intermediately(struct fw_iso_context *context, u
{ {
struct amdtp_stream *s = private_data; struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain; struct amdtp_domain *d = s->domain;
unsigned int packets = header_length / sizeof(__be32);
pool_ideal_seq_descs(d, packets);
process_rx_packets_intermediately(context, tstamp, header_length, header, private_data); process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d); process_ctxs_in_domain(d);
...@@ -1415,7 +1390,31 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, ...@@ -1415,7 +1390,31 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
s->ctx_data.tx.ctx_header_size = ctx_header_size; s->ctx_data.tx.ctx_header_size = ctx_header_size;
} else { } else {
s->ctx_data.rx.seq_index = 0; static const struct {
unsigned int data_block;
unsigned int syt_offset;
} *entry, initial_state[] = {
[CIP_SFC_32000] = { 4, 3072 },
[CIP_SFC_48000] = { 6, 1024 },
[CIP_SFC_96000] = { 12, 1024 },
[CIP_SFC_192000] = { 24, 1024 },
[CIP_SFC_44100] = { 0, 67 },
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
if (!s->ctx_data.rx.seq.descs)
goto err_context;
s->ctx_data.rx.seq.size = queue_size;
s->ctx_data.rx.seq.tail = 0;
s->ctx_data.rx.seq.head = 0;
entry = &initial_state[s->sfc];
s->ctx_data.rx.data_block_state = entry->data_block;
s->ctx_data.rx.syt_offset_state = entry->syt_offset;
s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
s->ctx_data.rx.event_count = 0; s->ctx_data.rx.event_count = 0;
} }
...@@ -1471,6 +1470,8 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, ...@@ -1471,6 +1470,8 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
err_pkt_descs: err_pkt_descs:
kfree(s->pkt_descs); kfree(s->pkt_descs);
err_context: err_context:
if (s->direction == AMDTP_OUT_STREAM)
kfree(s->ctx_data.rx.seq.descs);
fw_iso_context_destroy(s->context); fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1); s->context = ERR_PTR(-1);
err_buffer: err_buffer:
...@@ -1581,7 +1582,8 @@ static void amdtp_stream_stop(struct amdtp_stream *s) ...@@ -1581,7 +1582,8 @@ static void amdtp_stream_stop(struct amdtp_stream *s)
iso_packets_buffer_destroy(&s->buffer, s->unit); iso_packets_buffer_destroy(&s->buffer, s->unit);
kfree(s->pkt_descs); kfree(s->pkt_descs);
s->callbacked = false; if (s->direction == AMDTP_OUT_STREAM)
kfree(s->ctx_data.rx.seq.descs);
mutex_unlock(&s->mutex); mutex_unlock(&s->mutex);
} }
...@@ -1613,8 +1615,6 @@ int amdtp_domain_init(struct amdtp_domain *d) ...@@ -1613,8 +1615,6 @@ int amdtp_domain_init(struct amdtp_domain *d)
d->events_per_period = 0; d->events_per_period = 0;
d->seq.descs = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(amdtp_domain_init); EXPORT_SYMBOL_GPL(amdtp_domain_init);
...@@ -1665,18 +1665,6 @@ EXPORT_SYMBOL_GPL(amdtp_domain_add_stream); ...@@ -1665,18 +1665,6 @@ EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
*/ */
int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles) int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles)
{ {
static const struct {
unsigned int data_block;
unsigned int syt_offset;
} *entry, initial_state[] = {
[CIP_SFC_32000] = { 4, 3072 },
[CIP_SFC_48000] = { 6, 1024 },
[CIP_SFC_96000] = { 12, 1024 },
[CIP_SFC_192000] = { 24, 1024 },
[CIP_SFC_44100] = { 0, 67 },
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
unsigned int events_per_buffer = d->events_per_buffer; unsigned int events_per_buffer = d->events_per_buffer;
unsigned int events_per_period = d->events_per_period; unsigned int events_per_period = d->events_per_period;
unsigned int queue_size; unsigned int queue_size;
...@@ -1705,17 +1693,6 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles) ...@@ -1705,17 +1693,6 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles)
queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer, queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
amdtp_rate_table[d->irq_target->sfc]); amdtp_rate_table[d->irq_target->sfc]);
d->seq.descs = kcalloc(queue_size, sizeof(*d->seq.descs), GFP_KERNEL);
if (!d->seq.descs)
return -ENOMEM;
d->seq.size = queue_size;
d->seq.tail = 0;
entry = &initial_state[s->sfc];
d->data_block_state = entry->data_block;
d->syt_offset_state = entry->syt_offset;
d->last_syt_offset = TICKS_PER_CYCLE;
list_for_each_entry(s, &d->streams, list) { list_for_each_entry(s, &d->streams, list) {
unsigned int idle_irq_interval = 0; unsigned int idle_irq_interval = 0;
...@@ -1734,8 +1711,6 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles) ...@@ -1734,8 +1711,6 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles)
error: error:
list_for_each_entry(s, &d->streams, list) list_for_each_entry(s, &d->streams, list)
amdtp_stream_stop(s); amdtp_stream_stop(s);
kfree(d->seq.descs);
d->seq.descs = NULL;
return err; return err;
} }
EXPORT_SYMBOL_GPL(amdtp_domain_start); EXPORT_SYMBOL_GPL(amdtp_domain_start);
...@@ -1760,8 +1735,5 @@ void amdtp_domain_stop(struct amdtp_domain *d) ...@@ -1760,8 +1735,5 @@ void amdtp_domain_stop(struct amdtp_domain *d)
d->events_per_period = 0; d->events_per_period = 0;
d->irq_target = NULL; d->irq_target = NULL;
kfree(d->seq.descs);
d->seq.descs = NULL;
} }
EXPORT_SYMBOL_GPL(amdtp_domain_stop); EXPORT_SYMBOL_GPL(amdtp_domain_stop);
...@@ -142,13 +142,23 @@ struct amdtp_stream { ...@@ -142,13 +142,23 @@ struct amdtp_stream {
struct { struct {
// To calculate CIP data blocks and tstamp. // To calculate CIP data blocks and tstamp.
unsigned int transfer_delay; unsigned int transfer_delay;
unsigned int seq_index;
// To generate CIP header. // To generate CIP header.
unsigned int fdf; unsigned int fdf;
// To generate constant hardware IRQ. // To generate constant hardware IRQ.
unsigned int event_count; unsigned int event_count;
struct {
struct seq_desc *descs;
unsigned int size;
unsigned int tail;
unsigned int head;
} seq;
unsigned int data_block_state;
unsigned int syt_offset_state;
unsigned int last_syt_offset;
} rx; } rx;
} ctx_data; } ctx_data;
...@@ -281,16 +291,6 @@ struct amdtp_domain { ...@@ -281,16 +291,6 @@ struct amdtp_domain {
unsigned int tx_start; unsigned int tx_start;
unsigned int rx_start; unsigned int rx_start;
} processing_cycle; } processing_cycle;
struct {
struct seq_desc *descs;
unsigned int size;
unsigned int tail;
} seq;
unsigned int data_block_state;
unsigned int syt_offset_state;
unsigned int last_syt_offset;
}; };
int amdtp_domain_init(struct amdtp_domain *d); int amdtp_domain_init(struct amdtp_domain *d);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment