1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 #define OHCI_SECOND_MODULUS 8
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37 #define CIP_HEADER_QUADLETS 2
38 #define CIP_EOH_SHIFT 31
39 #define CIP_EOH (1u << CIP_EOH_SHIFT)
40 #define CIP_EOH_MASK 0x80000000
41 #define CIP_SID_SHIFT 24
42 #define CIP_SID_MASK 0x3f000000
43 #define CIP_DBS_MASK 0x00ff0000
44 #define CIP_DBS_SHIFT 16
45 #define CIP_SPH_MASK 0x00000400
46 #define CIP_SPH_SHIFT 10
47 #define CIP_DBC_MASK 0x000000ff
48 #define CIP_FMT_SHIFT 24
49 #define CIP_FMT_MASK 0x3f000000
50 #define CIP_FDF_MASK 0x00ff0000
51 #define CIP_FDF_SHIFT 16
52 #define CIP_FDF_NO_DATA 0xff
53 #define CIP_SYT_MASK 0x0000ffff
54 #define CIP_SYT_NO_INFO 0xffff
55 #define CIP_SYT_CYCLE_MODULUS 16
56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
60 /* Audio and Music transfer protocol specific parameters */
61 #define CIP_FMT_AM 0x10
62 #define AMDTP_FDF_NO_DATA 0xff
64 // For iso header and tstamp.
65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68 // Add two quadlets CIP header.
69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70 #define HEADER_TSTAMP_MASK 0x0000ffff
72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77 // overrun. Actual device can skip more, then this module stops the packet streaming.
78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
80 static void pcm_period_work(struct work_struct *work);
83 * amdtp_stream_init - initialize an AMDTP stream structure
84 * @s: the AMDTP stream to initialize
85 * @unit: the target of the stream
86 * @dir: the direction of stream
87 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
88 * @fmt: the value of fmt field in CIP header
89 * @process_ctx_payloads: callback handler to process payloads of isoc context
90 * @protocol_size: the size to allocate newly for protocol
92 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
93 enum amdtp_stream_direction dir, unsigned int flags,
95 amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
96 unsigned int protocol_size)
98 if (process_ctx_payloads == NULL)
101 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
108 s->context = ERR_PTR(-1);
109 mutex_init(&s->mutex);
110 INIT_WORK(&s->period_work, pcm_period_work);
113 init_waitqueue_head(&s->ready_wait);
116 s->process_ctx_payloads = process_ctx_payloads;
120 EXPORT_SYMBOL(amdtp_stream_init);
123 * amdtp_stream_destroy - free stream resources
124 * @s: the AMDTP stream to destroy
126 void amdtp_stream_destroy(struct amdtp_stream *s)
128 /* Not initialized. */
129 if (s->protocol == NULL)
132 WARN_ON(amdtp_stream_running(s));
134 mutex_destroy(&s->mutex);
136 EXPORT_SYMBOL(amdtp_stream_destroy);
138 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
142 [CIP_SFC_88200] = 16,
143 [CIP_SFC_96000] = 16,
144 [CIP_SFC_176400] = 32,
145 [CIP_SFC_192000] = 32,
147 EXPORT_SYMBOL(amdtp_syt_intervals);
149 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
150 [CIP_SFC_32000] = 32000,
151 [CIP_SFC_44100] = 44100,
152 [CIP_SFC_48000] = 48000,
153 [CIP_SFC_88200] = 88200,
154 [CIP_SFC_96000] = 96000,
155 [CIP_SFC_176400] = 176400,
156 [CIP_SFC_192000] = 192000,
158 EXPORT_SYMBOL(amdtp_rate_table);
160 static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
161 struct snd_pcm_hw_rule *rule)
163 struct snd_interval *s = hw_param_interval(params, rule->var);
164 const struct snd_interval *r =
165 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
166 struct snd_interval t = {0};
167 unsigned int step = 0;
170 for (i = 0; i < CIP_SFC_COUNT; ++i) {
171 if (snd_interval_test(r, amdtp_rate_table[i]))
172 step = max(step, amdtp_syt_intervals[i]);
175 t.min = roundup(s->min, step);
176 t.max = rounddown(s->max, step);
179 return snd_interval_refine(s, &t);
183 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
184 * @s: the AMDTP stream, which must be initialized.
185 * @runtime: the PCM substream runtime
187 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
188 struct snd_pcm_runtime *runtime)
190 struct snd_pcm_hardware *hw = &runtime->hw;
191 unsigned int ctx_header_size;
192 unsigned int maximum_usec_per_period;
195 hw->info = SNDRV_PCM_INFO_BATCH |
196 SNDRV_PCM_INFO_BLOCK_TRANSFER |
197 SNDRV_PCM_INFO_INTERLEAVED |
198 SNDRV_PCM_INFO_JOINT_DUPLEX |
199 SNDRV_PCM_INFO_MMAP |
200 SNDRV_PCM_INFO_MMAP_VALID;
202 /* SNDRV_PCM_INFO_BATCH */
204 hw->periods_max = UINT_MAX;
206 /* bytes for a frame */
207 hw->period_bytes_min = 4 * hw->channels_max;
209 /* Just to prevent from allocating much pages. */
210 hw->period_bytes_max = hw->period_bytes_min * 2048;
211 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
213 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
214 // context when total size of accumulated context header reaches
215 // PAGE_SIZE. This kicks work for the isoc context and brings
216 // callback in the middle of scheduled interrupts.
217 // Although AMDTP streams in the same domain use the same events per
218 // IRQ, use the largest size of context header between IT/IR contexts.
219 // Here, use the value of context header in IR context is for both
221 if (!(s->flags & CIP_NO_HEADER))
222 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
224 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
225 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
226 CYCLES_PER_SECOND / ctx_header_size;
228 // In IEC 61883-6, one isoc packet can transfer events up to the value
229 // of syt interval. This comes from the interval of isoc cycle. As 1394
230 // OHCI controller can generate hardware IRQ per isoc packet, the
231 // interval is 125 usec.
232 // However, there are two ways of transmission in IEC 61883-6; blocking
233 // and non-blocking modes. In blocking mode, the sequence of isoc packet
234 // includes 'empty' or 'NODATA' packets which include no event. In
235 // non-blocking mode, the number of events per packet is variable up to
237 // Due to the above protocol design, the minimum PCM frames per
238 // interrupt should be double of the value of syt interval, thus it is
240 err = snd_pcm_hw_constraint_minmax(runtime,
241 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
242 250, maximum_usec_per_period);
246 /* Non-Blocking stream has no more constraints */
247 if (!(s->flags & CIP_BLOCKING))
251 * One AMDTP packet can include some frames. In blocking mode, the
252 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
253 * depending on its sampling rate. For accurate period interrupt, it's
254 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
256 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
257 apply_constraint_to_size, NULL,
258 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
259 SNDRV_PCM_HW_PARAM_RATE, -1);
262 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
263 apply_constraint_to_size, NULL,
264 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
265 SNDRV_PCM_HW_PARAM_RATE, -1);
271 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
274 * amdtp_stream_set_parameters - set stream parameters
275 * @s: the AMDTP stream to configure
276 * @rate: the sample rate
277 * @data_block_quadlets: the size of a data block in quadlet unit
279 * The parameters must be set before the stream is started, and must not be
280 * changed while the stream is running.
282 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
283 unsigned int data_block_quadlets)
287 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
288 if (amdtp_rate_table[sfc] == rate)
291 if (sfc == ARRAY_SIZE(amdtp_rate_table))
295 s->data_block_quadlets = data_block_quadlets;
296 s->syt_interval = amdtp_syt_intervals[sfc];
298 // default buffering in the device.
299 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
301 // additional buffering needed to adjust for no-data packets.
302 if (s->flags & CIP_BLOCKING)
303 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
307 EXPORT_SYMBOL(amdtp_stream_set_parameters);
309 // The CIP header is processed in context header apart from context payload.
310 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
312 unsigned int multiplier;
314 if (s->flags & CIP_JUMBO_PAYLOAD)
315 multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
319 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
323 * amdtp_stream_get_max_payload - get the stream's packet size
324 * @s: the AMDTP stream
326 * This function must not be called before the stream has been configured
327 * with amdtp_stream_set_parameters().
329 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
331 unsigned int cip_header_size;
333 if (!(s->flags & CIP_NO_HEADER))
334 cip_header_size = CIP_HEADER_SIZE;
338 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
340 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
343 * amdtp_stream_pcm_prepare - prepare PCM device for running
344 * @s: the AMDTP stream
346 * This function should be called from the PCM device's .prepare callback.
348 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
350 cancel_work_sync(&s->period_work);
351 s->pcm_buffer_pointer = 0;
352 s->pcm_period_pointer = 0;
354 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
356 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
357 const unsigned int seq_size, unsigned int seq_tail,
360 const unsigned int syt_interval = s->syt_interval;
363 for (i = 0; i < count; ++i) {
364 struct seq_desc *desc = descs + seq_tail;
366 if (desc->syt_offset != CIP_SYT_NO_INFO)
367 desc->data_blocks = syt_interval;
369 desc->data_blocks = 0;
371 seq_tail = (seq_tail + 1) % seq_size;
375 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
376 const unsigned int seq_size, unsigned int seq_tail,
379 const enum cip_sfc sfc = s->sfc;
380 unsigned int state = s->ctx_data.rx.data_block_state;
383 for (i = 0; i < count; ++i) {
384 struct seq_desc *desc = descs + seq_tail;
386 if (!cip_sfc_is_base_44100(sfc)) {
387 // Sample_rate / 8000 is an integer, and precomputed.
388 desc->data_blocks = state;
390 unsigned int phase = state;
393 * This calculates the number of data blocks per packet so that
394 * 1) the overall rate is correct and exactly synchronized to
396 * 2) packets with a rounded-up number of blocks occur as early
397 * as possible in the sequence (to prevent underruns of the
400 if (sfc == CIP_SFC_44100)
401 /* 6 6 5 6 5 6 5 ... */
402 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
404 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
405 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
406 if (++phase >= (80 >> (sfc >> 1)))
411 seq_tail = (seq_tail + 1) % seq_size;
414 s->ctx_data.rx.data_block_state = state;
417 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
418 unsigned int *syt_offset_state, enum cip_sfc sfc)
420 unsigned int syt_offset;
422 if (*last_syt_offset < TICKS_PER_CYCLE) {
423 if (!cip_sfc_is_base_44100(sfc))
424 syt_offset = *last_syt_offset + *syt_offset_state;
427 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
428 * n * SYT_INTERVAL * 24576000 / sample_rate
429 * Modulo TICKS_PER_CYCLE, the difference between successive
430 * elements is about 1386.23. Rounding the results of this
431 * formula to the SYT precision results in a sequence of
432 * differences that begins with:
433 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
434 * This code generates _exactly_ the same sequence.
436 unsigned int phase = *syt_offset_state;
437 unsigned int index = phase % 13;
439 syt_offset = *last_syt_offset;
440 syt_offset += 1386 + ((index && !(index & 3)) ||
444 *syt_offset_state = phase;
447 syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
448 *last_syt_offset = syt_offset;
450 if (syt_offset >= TICKS_PER_CYCLE)
451 syt_offset = CIP_SYT_NO_INFO;
456 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
457 const unsigned int seq_size, unsigned int seq_tail,
460 const enum cip_sfc sfc = s->sfc;
461 unsigned int last = s->ctx_data.rx.last_syt_offset;
462 unsigned int state = s->ctx_data.rx.syt_offset_state;
465 for (i = 0; i < count; ++i) {
466 struct seq_desc *desc = descs + seq_tail;
468 desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
470 seq_tail = (seq_tail + 1) % seq_size;
473 s->ctx_data.rx.last_syt_offset = last;
474 s->ctx_data.rx.syt_offset_state = state;
477 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
478 unsigned int transfer_delay)
480 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
481 unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
482 unsigned int syt_offset;
485 if (syt_cycle_lo < cycle_lo)
486 syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
487 syt_cycle_lo -= cycle_lo;
489 // Subtract transfer delay so that the synchronization offset is not so large
491 syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
492 if (syt_offset < transfer_delay)
493 syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
495 return syt_offset - transfer_delay;
498 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *descs, unsigned int desc_count)
500 const unsigned int transfer_delay = s->transfer_delay;
501 const unsigned int cache_size = s->ctx_data.tx.cache.size;
502 struct seq_desc *cache = s->ctx_data.tx.cache.descs;
503 unsigned int cache_tail = s->ctx_data.tx.cache.tail;
504 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
507 for (i = 0; i < desc_count; ++i) {
508 struct seq_desc *dst = cache + cache_tail;
509 const struct pkt_desc *src = descs + i;
511 if (aware_syt && src->syt != CIP_SYT_NO_INFO)
512 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
514 dst->syt_offset = CIP_SYT_NO_INFO;
515 dst->data_blocks = src->data_blocks;
517 cache_tail = (cache_tail + 1) % cache_size;
520 s->ctx_data.tx.cache.tail = cache_tail;
523 static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
525 struct seq_desc *descs = s->ctx_data.rx.seq.descs;
526 unsigned int seq_tail = s->ctx_data.rx.seq.tail;
527 const unsigned int seq_size = s->ctx_data.rx.seq.size;
529 pool_ideal_syt_offsets(s, descs, seq_size, seq_tail, count);
531 if (s->flags & CIP_BLOCKING)
532 pool_blocking_data_blocks(s, descs, seq_size, seq_tail, count);
534 pool_ideal_nonblocking_data_blocks(s, descs, seq_size, seq_tail, count);
536 s->ctx_data.rx.seq.tail = (seq_tail + count) % seq_size;
539 static void update_pcm_pointers(struct amdtp_stream *s,
540 struct snd_pcm_substream *pcm,
545 ptr = s->pcm_buffer_pointer + frames;
546 if (ptr >= pcm->runtime->buffer_size)
547 ptr -= pcm->runtime->buffer_size;
548 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
550 s->pcm_period_pointer += frames;
551 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
552 s->pcm_period_pointer -= pcm->runtime->period_size;
553 queue_work(system_highpri_wq, &s->period_work);
557 static void pcm_period_work(struct work_struct *work)
559 struct amdtp_stream *s = container_of(work, struct amdtp_stream,
561 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
564 snd_pcm_period_elapsed(pcm);
567 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
572 params->interrupt = sched_irq;
573 params->tag = s->tag;
576 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
577 s->buffer.packets[s->packet_index].offset);
579 dev_err(&s->unit->device, "queueing error: %d\n", err);
583 if (++s->packet_index >= s->queue_size)
589 static inline int queue_out_packet(struct amdtp_stream *s,
590 struct fw_iso_packet *params, bool sched_irq)
593 !!(params->header_length == 0 && params->payload_length == 0);
594 return queue_packet(s, params, sched_irq);
597 static inline int queue_in_packet(struct amdtp_stream *s,
598 struct fw_iso_packet *params)
600 // Queue one packet for IR context.
601 params->header_length = s->ctx_data.tx.ctx_header_size;
602 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
603 params->skip = false;
604 return queue_packet(s, params, false);
607 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
608 unsigned int data_block_counter, unsigned int syt)
610 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
611 (s->data_block_quadlets << CIP_DBS_SHIFT) |
612 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
614 cip_header[1] = cpu_to_be32(CIP_EOH |
615 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
616 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
617 (syt & CIP_SYT_MASK));
620 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
621 struct fw_iso_packet *params, unsigned int header_length,
622 unsigned int data_blocks,
623 unsigned int data_block_counter,
624 unsigned int syt, unsigned int index)
626 unsigned int payload_length;
629 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
630 params->payload_length = payload_length;
632 if (header_length > 0) {
633 cip_header = (__be32 *)params->header;
634 generate_cip_header(s, cip_header, data_block_counter, syt);
635 params->header_length = header_length;
640 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
641 data_block_counter, s->packet_index, index);
644 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
645 unsigned int payload_length,
646 unsigned int *data_blocks,
647 unsigned int *data_block_counter, unsigned int *syt)
656 cip_header[0] = be32_to_cpu(buf[0]);
657 cip_header[1] = be32_to_cpu(buf[1]);
660 * This module supports 'Two-quadlet CIP header with SYT field'.
661 * For convenience, also check FMT field is AM824 or not.
663 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
664 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
665 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
666 dev_info_ratelimited(&s->unit->device,
667 "Invalid CIP header for AMDTP: %08X:%08X\n",
668 cip_header[0], cip_header[1]);
672 /* Check valid protocol or not. */
673 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
674 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
675 if (sph != s->sph || fmt != s->fmt) {
676 dev_info_ratelimited(&s->unit->device,
677 "Detect unexpected protocol: %08x %08x\n",
678 cip_header[0], cip_header[1]);
682 /* Calculate data blocks */
683 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
684 if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
687 unsigned int data_block_quadlets =
688 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
689 /* avoid division by zero */
690 if (data_block_quadlets == 0) {
691 dev_err(&s->unit->device,
692 "Detect invalid value in dbs field: %08X\n",
696 if (s->flags & CIP_WRONG_DBS)
697 data_block_quadlets = s->data_block_quadlets;
699 *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
702 /* Check data block counter continuity */
703 dbc = cip_header[0] & CIP_DBC_MASK;
704 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
705 *data_block_counter != UINT_MAX)
706 dbc = *data_block_counter;
708 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
709 *data_block_counter == UINT_MAX) {
711 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
712 lost = dbc != *data_block_counter;
714 unsigned int dbc_interval;
716 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
717 dbc_interval = s->ctx_data.tx.dbc_interval;
719 dbc_interval = *data_blocks;
721 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
725 dev_err(&s->unit->device,
726 "Detect discontinuity of CIP: %02X %02X\n",
727 *data_block_counter, dbc);
731 *data_block_counter = dbc;
733 if (!(s->flags & CIP_UNAWARE_SYT))
734 *syt = cip_header[1] & CIP_SYT_MASK;
739 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
740 const __be32 *ctx_header,
741 unsigned int *data_blocks,
742 unsigned int *data_block_counter,
743 unsigned int *syt, unsigned int packet_index, unsigned int index)
745 unsigned int payload_length;
746 const __be32 *cip_header;
747 unsigned int cip_header_size;
749 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
751 if (!(s->flags & CIP_NO_HEADER))
752 cip_header_size = CIP_HEADER_SIZE;
756 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
757 dev_err(&s->unit->device,
758 "Detect jumbo payload: %04x %04x\n",
759 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
763 if (cip_header_size > 0) {
764 if (payload_length >= cip_header_size) {
767 cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
768 err = check_cip_header(s, cip_header, payload_length - cip_header_size,
769 data_blocks, data_block_counter, syt);
773 // Handle the cycle so that empty packet arrives.
780 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
783 if (*data_block_counter == UINT_MAX)
784 *data_block_counter = 0;
787 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
788 *data_block_counter, packet_index, index);
793 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
794 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
795 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
796 static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
798 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
799 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
802 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
805 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
806 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
810 static int compare_ohci_cycle_count(u32 lval, u32 rval)
814 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
820 // Align to actual cycle count for the packet which is going to be scheduled.
821 // This module queued the same number of isochronous cycle as the size of queue
822 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
823 // the size of queue for scheduled cycle.
824 static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
825 unsigned int queue_size)
827 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
828 return increment_ohci_cycle_count(cycle, queue_size);
831 static int generate_device_pkt_descs(struct amdtp_stream *s,
832 struct pkt_desc *descs,
833 const __be32 *ctx_header,
834 unsigned int packets,
835 unsigned int *desc_count)
837 unsigned int next_cycle = s->next_cycle;
838 unsigned int dbc = s->data_block_counter;
839 unsigned int packet_index = s->packet_index;
840 unsigned int queue_size = s->queue_size;
845 for (i = 0; i < packets; ++i) {
846 struct pkt_desc *desc = descs + *desc_count;
849 unsigned int data_blocks;
852 cycle = compute_ohci_cycle_count(ctx_header[1]);
853 lost = (next_cycle != cycle);
855 if (s->flags & CIP_NO_HEADER) {
856 // Fireface skips transmission just for an isoc cycle corresponding
858 unsigned int prev_cycle = next_cycle;
860 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
861 lost = (next_cycle != cycle);
863 // Prepare a description for the skipped cycle for
865 desc->cycle = prev_cycle;
867 desc->data_blocks = 0;
868 desc->data_block_counter = dbc;
869 desc->ctx_payload = NULL;
873 } else if (s->flags & CIP_JUMBO_PAYLOAD) {
874 // OXFW970 skips transmission for several isoc cycles during
875 // asynchronous transaction. The sequence replay is impossible due
877 unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
878 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
879 lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
882 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
888 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
895 desc->data_blocks = data_blocks;
896 desc->data_block_counter = dbc;
897 desc->ctx_payload = s->buffer.packets[packet_index].buffer;
899 if (!(s->flags & CIP_DBC_IS_END_EVENT))
900 dbc = (dbc + desc->data_blocks) & 0xff;
902 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
904 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
905 packet_index = (packet_index + 1) % queue_size;
908 s->next_cycle = next_cycle;
909 s->data_block_counter = dbc;
914 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
915 unsigned int transfer_delay)
919 syt_offset += transfer_delay;
920 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
921 (syt_offset % TICKS_PER_CYCLE);
922 return syt & CIP_SYT_MASK;
925 static void generate_pkt_descs(struct amdtp_stream *s, const __be32 *ctx_header, unsigned int packets)
927 struct pkt_desc *descs = s->pkt_descs;
928 const struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
929 const unsigned int seq_size = s->ctx_data.rx.seq.size;
930 unsigned int dbc = s->data_block_counter;
931 unsigned int seq_head = s->ctx_data.rx.seq.head;
932 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
935 for (i = 0; i < packets; ++i) {
936 struct pkt_desc *desc = descs + i;
937 unsigned int index = (s->packet_index + i) % s->queue_size;
938 const struct seq_desc *seq = seq_descs + seq_head;
940 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
942 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
943 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
945 desc->syt = CIP_SYT_NO_INFO;
947 desc->data_blocks = seq->data_blocks;
949 if (s->flags & CIP_DBC_IS_END_EVENT)
950 dbc = (dbc + desc->data_blocks) & 0xff;
952 desc->data_block_counter = dbc;
954 if (!(s->flags & CIP_DBC_IS_END_EVENT))
955 dbc = (dbc + desc->data_blocks) & 0xff;
957 desc->ctx_payload = s->buffer.packets[index].buffer;
959 seq_head = (seq_head + 1) % seq_size;
964 s->data_block_counter = dbc;
965 s->ctx_data.rx.seq.head = seq_head;
968 static inline void cancel_stream(struct amdtp_stream *s)
970 s->packet_index = -1;
971 if (current_work() == &s->period_work)
972 amdtp_stream_pcm_abort(s);
973 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
976 static void process_ctx_payloads(struct amdtp_stream *s,
977 const struct pkt_desc *descs,
978 unsigned int packets)
980 struct snd_pcm_substream *pcm;
981 unsigned int pcm_frames;
983 pcm = READ_ONCE(s->pcm);
984 pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm);
986 update_pcm_pointers(s, pcm, pcm_frames);
989 static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
990 void *header, void *private_data)
992 struct amdtp_stream *s = private_data;
993 const struct amdtp_domain *d = s->domain;
994 const __be32 *ctx_header = header;
995 const unsigned int events_per_period = d->events_per_period;
996 unsigned int event_count = s->ctx_data.rx.event_count;
997 unsigned int pkt_header_length;
998 unsigned int packets;
1001 if (s->packet_index < 0)
1004 // Calculate the number of packets in buffer and check XRUN.
1005 packets = header_length / sizeof(*ctx_header);
1007 pool_ideal_seq_descs(s, packets);
1009 generate_pkt_descs(s, ctx_header, packets);
1011 process_ctx_payloads(s, s->pkt_descs, packets);
1013 if (!(s->flags & CIP_NO_HEADER))
1014 pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
1016 pkt_header_length = 0;
1018 for (i = 0; i < packets; ++i) {
1019 const struct pkt_desc *desc = s->pkt_descs + i;
1021 struct fw_iso_packet params;
1022 __be32 header[CIP_HEADER_QUADLETS];
1023 } template = { {0}, {0} };
1024 bool sched_irq = false;
1026 build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
1027 desc->data_blocks, desc->data_block_counter,
1030 if (s == s->domain->irq_target) {
1031 event_count += desc->data_blocks;
1032 if (event_count >= events_per_period) {
1033 event_count -= events_per_period;
1038 if (queue_out_packet(s, &template.params, sched_irq) < 0) {
1044 s->ctx_data.rx.event_count = event_count;
1047 static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1048 void *header, void *private_data)
1050 struct amdtp_stream *s = private_data;
1051 struct amdtp_domain *d = s->domain;
1052 const __be32 *ctx_header = header;
1053 unsigned int packets;
1057 if (s->packet_index < 0)
1060 packets = header_length / sizeof(*ctx_header);
1062 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1063 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1065 for (i = 0; i < packets; ++i) {
1066 struct fw_iso_packet params = {
1068 .payload_length = 0,
1070 bool sched_irq = (s == d->irq_target && i == packets - 1);
1072 if (queue_out_packet(s, ¶ms, sched_irq) < 0) {
1079 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1080 void *header, void *private_data);
1082 static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1083 size_t header_length, void *header, void *private_data)
1085 struct amdtp_stream *s = private_data;
1086 struct amdtp_domain *d = s->domain;
1087 __be32 *ctx_header = header;
1088 const unsigned int queue_size = s->queue_size;
1089 unsigned int packets;
1090 unsigned int offset;
1092 if (s->packet_index < 0)
1095 packets = header_length / sizeof(*ctx_header);
1098 while (offset < packets) {
1099 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
1101 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1108 unsigned int length = sizeof(*ctx_header) * offset;
1110 skip_rx_packets(context, tstamp, length, ctx_header, private_data);
1111 if (amdtp_streaming_error(s))
1114 ctx_header += offset;
1115 header_length -= length;
1118 if (offset < packets) {
1119 s->ready_processing = true;
1120 wake_up(&s->ready_wait);
1122 process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
1123 if (amdtp_streaming_error(s))
1126 if (s == d->irq_target)
1127 s->context->callback.sc = irq_target_callback;
1129 s->context->callback.sc = process_rx_packets;
1133 static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1134 void *header, void *private_data)
1136 struct amdtp_stream *s = private_data;
1137 __be32 *ctx_header = header;
1138 unsigned int packets;
1139 unsigned int desc_count;
1143 if (s->packet_index < 0)
1146 // Calculate the number of packets in buffer and check XRUN.
1147 packets = header_length / s->ctx_data.tx.ctx_header_size;
1150 err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets, &desc_count);
1152 if (err != -EAGAIN) {
1157 struct amdtp_domain *d = s->domain;
1159 process_ctx_payloads(s, s->pkt_descs, desc_count);
1161 if (d->replay.enable)
1162 cache_seq(s, s->pkt_descs, desc_count);
1165 for (i = 0; i < packets; ++i) {
1166 struct fw_iso_packet params = {0};
1168 if (queue_in_packet(s, ¶ms) < 0) {
1175 static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1176 void *header, void *private_data)
1178 struct amdtp_stream *s = private_data;
1179 const __be32 *ctx_header = header;
1180 unsigned int packets;
1184 if (s->packet_index < 0)
1187 packets = header_length / s->ctx_data.tx.ctx_header_size;
1189 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1190 cycle = compute_ohci_cycle_count(ctx_header[1]);
1191 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1193 for (i = 0; i < packets; ++i) {
1194 struct fw_iso_packet params = {0};
1196 if (queue_in_packet(s, ¶ms) < 0) {
1203 static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1204 size_t header_length, void *header, void *private_data)
1206 struct amdtp_stream *s = private_data;
1207 struct amdtp_domain *d = s->domain;
1209 unsigned int packets;
1210 unsigned int offset;
1212 if (s->packet_index < 0)
1215 packets = header_length / s->ctx_data.tx.ctx_header_size;
1218 ctx_header = header;
1219 while (offset < packets) {
1220 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
1222 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1225 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1229 ctx_header = header;
1232 size_t length = s->ctx_data.tx.ctx_header_size * offset;
1234 drop_tx_packets(context, tstamp, length, ctx_header, s);
1235 if (amdtp_streaming_error(s))
1238 ctx_header += length / sizeof(*ctx_header);
1239 header_length -= length;
1242 if (offset < packets) {
1243 s->ready_processing = true;
1244 wake_up(&s->ready_wait);
1246 process_tx_packets(context, tstamp, header_length, ctx_header, s);
1247 if (amdtp_streaming_error(s))
1250 context->callback.sc = process_tx_packets;
1254 static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
1255 size_t header_length, void *header, void *private_data)
1257 struct amdtp_stream *s = private_data;
1258 struct amdtp_domain *d = s->domain;
1261 unsigned int events;
1264 if (s->packet_index < 0)
1267 count = header_length / s->ctx_data.tx.ctx_header_size;
1269 // Attempt to detect any event in the batch of packets.
1271 ctx_header = header;
1272 for (i = 0; i < count; ++i) {
1273 unsigned int payload_quads =
1274 (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
1275 unsigned int data_blocks;
1277 if (s->flags & CIP_NO_HEADER) {
1278 data_blocks = payload_quads / s->data_block_quadlets;
1280 __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
1282 if (payload_quads < CIP_HEADER_QUADLETS) {
1285 payload_quads -= CIP_HEADER_QUADLETS;
1287 if (s->flags & CIP_UNAWARE_SYT) {
1288 data_blocks = payload_quads / s->data_block_quadlets;
1290 u32 cip1 = be32_to_cpu(cip_headers[1]);
1292 // NODATA packet can includes any data blocks but they are
1293 // not available as event.
1294 if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
1297 data_blocks = payload_quads / s->data_block_quadlets;
1302 events += data_blocks;
1304 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1307 drop_tx_packets(context, tstamp, header_length, header, s);
1310 s->ctx_data.tx.event_starts = true;
1312 // Decide the cycle count to begin processing content of packet in IR contexts.
1314 unsigned int stream_count = 0;
1315 unsigned int event_starts_count = 0;
1316 unsigned int cycle = UINT_MAX;
1318 list_for_each_entry(s, &d->streams, list) {
1319 if (s->direction == AMDTP_IN_STREAM) {
1321 if (s->ctx_data.tx.event_starts)
1322 ++event_starts_count;
1326 if (stream_count == event_starts_count) {
1327 unsigned int next_cycle;
1329 list_for_each_entry(s, &d->streams, list) {
1330 if (s->direction != AMDTP_IN_STREAM)
1333 next_cycle = increment_ohci_cycle_count(s->next_cycle,
1334 d->processing_cycle.tx_init_skip);
1335 if (cycle == UINT_MAX ||
1336 compare_ohci_cycle_count(next_cycle, cycle) > 0)
1339 s->context->callback.sc = process_tx_packets_intermediately;
1342 d->processing_cycle.tx_start = cycle;
1347 static void process_ctxs_in_domain(struct amdtp_domain *d)
1349 struct amdtp_stream *s;
1351 list_for_each_entry(s, &d->streams, list) {
1352 if (s != d->irq_target && amdtp_stream_running(s))
1353 fw_iso_context_flush_completions(s->context);
1355 if (amdtp_streaming_error(s))
1361 if (amdtp_stream_running(d->irq_target))
1362 cancel_stream(d->irq_target);
1364 list_for_each_entry(s, &d->streams, list) {
1365 if (amdtp_stream_running(s))
1370 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1371 void *header, void *private_data)
1373 struct amdtp_stream *s = private_data;
1374 struct amdtp_domain *d = s->domain;
1376 process_rx_packets(context, tstamp, header_length, header, private_data);
1377 process_ctxs_in_domain(d);
1380 static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
1381 size_t header_length, void *header, void *private_data)
1383 struct amdtp_stream *s = private_data;
1384 struct amdtp_domain *d = s->domain;
1386 process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
1387 process_ctxs_in_domain(d);
1390 static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
1391 size_t header_length, void *header, void *private_data)
1393 struct amdtp_stream *s = private_data;
1394 struct amdtp_domain *d = s->domain;
1397 skip_rx_packets(context, tstamp, header_length, header, private_data);
1398 process_ctxs_in_domain(d);
1400 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1401 // contexts are expected to start and get callback when reaching here.
1402 cycle = s->next_cycle;
1403 list_for_each_entry(s, &d->streams, list) {
1404 if (s->direction != AMDTP_OUT_STREAM)
1407 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1408 cycle = s->next_cycle;
1410 if (s == d->irq_target)
1411 s->context->callback.sc = irq_target_callback_intermediately;
1413 s->context->callback.sc = process_rx_packets_intermediately;
1416 d->processing_cycle.rx_start = cycle;
1419 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1420 // transmit first packet.
1421 static void amdtp_stream_first_callback(struct fw_iso_context *context,
1422 u32 tstamp, size_t header_length,
1423 void *header, void *private_data)
1425 struct amdtp_stream *s = private_data;
1426 struct amdtp_domain *d = s->domain;
1428 if (s->direction == AMDTP_IN_STREAM) {
1429 context->callback.sc = drop_tx_packets_initially;
1431 if (s == d->irq_target)
1432 context->callback.sc = irq_target_callback_skip;
1434 context->callback.sc = skip_rx_packets;
1437 context->callback.sc(context, tstamp, header_length, header, s);
1441 * amdtp_stream_start - start transferring packets
1442 * @s: the AMDTP stream to start
1443 * @channel: the isochronous channel on the bus
1444 * @speed: firewire speed code
1445 * @queue_size: The number of packets in the queue.
1446 * @idle_irq_interval: the interval to queue packet during initial state.
1448 * The stream cannot be started until it has been configured with
1449 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1450 * device can be started.
1452 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1453 unsigned int queue_size, unsigned int idle_irq_interval)
1455 bool is_irq_target = (s == s->domain->irq_target);
1456 unsigned int ctx_header_size;
1457 unsigned int max_ctx_payload_size;
1458 enum dma_data_direction dir;
1461 mutex_lock(&s->mutex);
1463 if (WARN_ON(amdtp_stream_running(s) ||
1464 (s->data_block_quadlets < 1))) {
1469 if (s->direction == AMDTP_IN_STREAM) {
1470 // NOTE: IT context should be used for constant IRQ.
1471 if (is_irq_target) {
1476 s->data_block_counter = UINT_MAX;
1478 s->data_block_counter = 0;
1481 // initialize packet buffer.
1482 if (s->direction == AMDTP_IN_STREAM) {
1483 dir = DMA_FROM_DEVICE;
1484 type = FW_ISO_CONTEXT_RECEIVE;
1485 if (!(s->flags & CIP_NO_HEADER))
1486 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1488 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1490 dir = DMA_TO_DEVICE;
1491 type = FW_ISO_CONTEXT_TRANSMIT;
1492 ctx_header_size = 0; // No effect for IT context.
1494 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
1496 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
1499 s->queue_size = queue_size;
1501 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1502 type, channel, speed, ctx_header_size,
1503 amdtp_stream_first_callback, s);
1504 if (IS_ERR(s->context)) {
1505 err = PTR_ERR(s->context);
1507 dev_err(&s->unit->device,
1508 "no free stream on this controller\n");
1512 amdtp_stream_update(s);
1514 if (s->direction == AMDTP_IN_STREAM) {
1515 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1516 s->ctx_data.tx.ctx_header_size = ctx_header_size;
1517 s->ctx_data.tx.event_starts = false;
1519 if (s->domain->replay.enable) {
1520 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1521 // possible to cache much unexpectedly.
1522 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1523 queue_size * 3 / 2);
1524 s->ctx_data.tx.cache.tail = 0;
1525 s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
1526 sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1527 if (!s->ctx_data.tx.cache.descs)
1531 static const struct {
1532 unsigned int data_block;
1533 unsigned int syt_offset;
1534 } *entry, initial_state[] = {
1535 [CIP_SFC_32000] = { 4, 3072 },
1536 [CIP_SFC_48000] = { 6, 1024 },
1537 [CIP_SFC_96000] = { 12, 1024 },
1538 [CIP_SFC_192000] = { 24, 1024 },
1539 [CIP_SFC_44100] = { 0, 67 },
1540 [CIP_SFC_88200] = { 0, 67 },
1541 [CIP_SFC_176400] = { 0, 67 },
1544 s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1545 if (!s->ctx_data.rx.seq.descs)
1547 s->ctx_data.rx.seq.size = queue_size;
1548 s->ctx_data.rx.seq.tail = 0;
1549 s->ctx_data.rx.seq.head = 0;
1551 entry = &initial_state[s->sfc];
1552 s->ctx_data.rx.data_block_state = entry->data_block;
1553 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1554 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1556 s->ctx_data.rx.event_count = 0;
1559 if (s->flags & CIP_NO_HEADER)
1560 s->tag = TAG_NO_CIP_HEADER;
1564 s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
1566 if (!s->pkt_descs) {
1571 s->packet_index = 0;
1573 struct fw_iso_packet params;
1575 if (s->direction == AMDTP_IN_STREAM) {
1576 err = queue_in_packet(s, ¶ms);
1578 bool sched_irq = false;
1580 params.header_length = 0;
1581 params.payload_length = 0;
1583 if (is_irq_target) {
1584 sched_irq = !((s->packet_index + 1) %
1588 err = queue_out_packet(s, ¶ms, sched_irq);
1592 } while (s->packet_index > 0);
1594 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1595 tag = FW_ISO_CONTEXT_MATCH_TAG1;
1596 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1597 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1599 s->ready_processing = false;
1600 err = fw_iso_context_start(s->context, -1, 0, tag);
1604 mutex_unlock(&s->mutex);
1608 kfree(s->pkt_descs);
1610 if (s->direction == AMDTP_OUT_STREAM) {
1611 kfree(s->ctx_data.rx.seq.descs);
1613 if (s->domain->replay.enable)
1614 kfree(s->ctx_data.tx.cache.descs);
1616 fw_iso_context_destroy(s->context);
1617 s->context = ERR_PTR(-1);
1619 iso_packets_buffer_destroy(&s->buffer, s->unit);
1621 mutex_unlock(&s->mutex);
1627 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1628 * @d: the AMDTP domain.
1629 * @s: the AMDTP stream that transports the PCM data
1631 * Returns the current buffer position, in frames.
1633 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1634 struct amdtp_stream *s)
1636 struct amdtp_stream *irq_target = d->irq_target;
1638 if (irq_target && amdtp_stream_running(irq_target)) {
1639 // This function is called in software IRQ context of
1640 // period_work or process context.
1642 // When the software IRQ context was scheduled by software IRQ
1643 // context of IT contexts, queued packets were already handled.
1644 // Therefore, no need to flush the queue in buffer furthermore.
1646 // When the process context reach here, some packets will be
1647 // already queued in the buffer. These packets should be handled
1648 // immediately to keep better granularity of PCM pointer.
1650 // Later, the process context will sometimes schedules software
1651 // IRQ context of the period_work. Then, no need to flush the
1652 // queue by the same reason as described in the above
1653 if (current_work() != &s->period_work) {
1654 // Queued packet should be processed without any kernel
1655 // preemption to keep latency against bus cycle.
1657 fw_iso_context_flush_completions(irq_target->context);
1662 return READ_ONCE(s->pcm_buffer_pointer);
1664 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1667 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1668 * @d: the AMDTP domain.
1669 * @s: the AMDTP stream that transfers the PCM frames
1671 * Returns zero always.
1673 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1675 struct amdtp_stream *irq_target = d->irq_target;
1677 // Process isochronous packets for recent isochronous cycle to handle
1678 // queued PCM frames.
1679 if (irq_target && amdtp_stream_running(irq_target)) {
1680 // Queued packet should be processed without any kernel
1681 // preemption to keep latency against bus cycle.
1683 fw_iso_context_flush_completions(irq_target->context);
1689 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1692 * amdtp_stream_update - update the stream after a bus reset
1693 * @s: the AMDTP stream
1695 void amdtp_stream_update(struct amdtp_stream *s)
1698 WRITE_ONCE(s->source_node_id_field,
1699 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1701 EXPORT_SYMBOL(amdtp_stream_update);
1704 * amdtp_stream_stop - stop sending packets
1705 * @s: the AMDTP stream to stop
1707 * All PCM and MIDI devices of the stream must be stopped before the stream
1708 * itself can be stopped.
1710 static void amdtp_stream_stop(struct amdtp_stream *s)
1712 mutex_lock(&s->mutex);
1714 if (!amdtp_stream_running(s)) {
1715 mutex_unlock(&s->mutex);
1719 cancel_work_sync(&s->period_work);
1720 fw_iso_context_stop(s->context);
1721 fw_iso_context_destroy(s->context);
1722 s->context = ERR_PTR(-1);
1723 iso_packets_buffer_destroy(&s->buffer, s->unit);
1724 kfree(s->pkt_descs);
1726 if (s->direction == AMDTP_OUT_STREAM) {
1727 kfree(s->ctx_data.rx.seq.descs);
1729 if (s->domain->replay.enable)
1730 kfree(s->ctx_data.tx.cache.descs);
1733 mutex_unlock(&s->mutex);
1737 * amdtp_stream_pcm_abort - abort the running PCM device
1738 * @s: the AMDTP stream about to be stopped
1740 * If the isochronous stream needs to be stopped asynchronously, call this
1741 * function first to stop the PCM device.
1743 void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1745 struct snd_pcm_substream *pcm;
1747 pcm = READ_ONCE(s->pcm);
1749 snd_pcm_stop_xrun(pcm);
1751 EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1754 * amdtp_domain_init - initialize an AMDTP domain structure
1755 * @d: the AMDTP domain to initialize.
1757 int amdtp_domain_init(struct amdtp_domain *d)
1759 INIT_LIST_HEAD(&d->streams);
1761 d->events_per_period = 0;
1765 EXPORT_SYMBOL_GPL(amdtp_domain_init);
1768 * amdtp_domain_destroy - destroy an AMDTP domain structure
1769 * @d: the AMDTP domain to destroy.
1771 void amdtp_domain_destroy(struct amdtp_domain *d)
1773 // At present nothing to do.
1776 EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1779 * amdtp_domain_add_stream - register isoc context into the domain.
1780 * @d: the AMDTP domain.
1781 * @s: the AMDTP stream.
1782 * @channel: the isochronous channel on the bus.
1783 * @speed: firewire speed code.
1785 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1786 int channel, int speed)
1788 struct amdtp_stream *tmp;
1790 list_for_each_entry(tmp, &d->streams, list) {
1795 list_add(&s->list, &d->streams);
1797 s->channel = channel;
1803 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
1806 * amdtp_domain_start - start sending packets for isoc context in the domain.
1807 * @d: the AMDTP domain.
1808 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
1810 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
1813 int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq)
1815 unsigned int events_per_buffer = d->events_per_buffer;
1816 unsigned int events_per_period = d->events_per_period;
1817 unsigned int queue_size;
1818 struct amdtp_stream *s;
1821 d->replay.enable = replay_seq;
1823 // Select an IT context as IRQ target.
1824 list_for_each_entry(s, &d->streams, list) {
1825 if (s->direction == AMDTP_OUT_STREAM)
1832 d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
1834 // This is a case that AMDTP streams in domain run just for MIDI
1835 // substream. Use the number of events equivalent to 10 msec as
1836 // interval of hardware IRQ.
1837 if (events_per_period == 0)
1838 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
1839 if (events_per_buffer == 0)
1840 events_per_buffer = events_per_period * 3;
1842 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
1843 amdtp_rate_table[d->irq_target->sfc]);
1845 list_for_each_entry(s, &d->streams, list) {
1846 unsigned int idle_irq_interval = 0;
1848 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
1849 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
1850 amdtp_rate_table[d->irq_target->sfc]);
1853 // Starts immediately but actually DMA context starts several hundred cycles later.
1854 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
1861 list_for_each_entry(s, &d->streams, list)
1862 amdtp_stream_stop(s);
1865 EXPORT_SYMBOL_GPL(amdtp_domain_start);
1868 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
1869 * @d: the AMDTP domain to which the isoc contexts belong.
1871 void amdtp_domain_stop(struct amdtp_domain *d)
1873 struct amdtp_stream *s, *next;
1876 amdtp_stream_stop(d->irq_target);
1878 list_for_each_entry_safe(s, next, &d->streams, list) {
1881 if (s != d->irq_target)
1882 amdtp_stream_stop(s);
1885 d->events_per_period = 0;
1886 d->irq_target = NULL;
1888 EXPORT_SYMBOL_GPL(amdtp_domain_stop);