Lines Matching full:cycle
231 // of syt interval. This comes from the interval of isoc cycle. As 1394 in amdtp_stream_add_pcm_hw_constraints()
484 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle, in compute_syt_offset() argument
487 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f; in compute_syt_offset()
534 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay); in cache_seq()
705 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, in build_it_pkt_header() argument
725 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks, in build_it_pkt_header()
828 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, in parse_ir_ctx_header() argument
863 // Handle the cycle so that empty packet arrives. in parse_ir_ctx_header()
877 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks, in parse_ir_ctx_header()
897 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend) in increment_ohci_cycle_count() argument
899 cycle += addend; in increment_ohci_cycle_count()
900 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND) in increment_ohci_cycle_count()
901 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND; in increment_ohci_cycle_count()
902 return cycle; in increment_ohci_cycle_count()
923 // Align to actual cycle count for the packet which is going to be scheduled.
924 // This module queued the same number of isochronous cycle as the size of queue
925 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
926 // the size of queue for scheduled cycle.
930 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp); in compute_ohci_it_cycle() local
931 return increment_ohci_cycle_count(cycle, queue_size); in compute_ohci_it_cycle()
951 unsigned int cycle; in generate_tx_packet_descs() local
956 cycle = compute_ohci_cycle_count(ctx_header[1]); in generate_tx_packet_descs()
957 lost = (next_cycle != cycle); in generate_tx_packet_descs()
960 // Fireface skips transmission just for an isoc cycle corresponding in generate_tx_packet_descs()
965 lost = (next_cycle != cycle); in generate_tx_packet_descs()
967 // Prepare a description for the skipped cycle for in generate_tx_packet_descs()
969 desc->cycle = prev_cycle; in generate_tx_packet_descs()
983 lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0); in generate_tx_packet_descs()
986 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n", in generate_tx_packet_descs()
987 next_cycle, cycle); in generate_tx_packet_descs()
992 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt, in generate_tx_packet_descs()
997 desc->cycle = cycle; in generate_tx_packet_descs()
1019 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle, in compute_syt() argument
1025 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) | in compute_syt()
1046 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); in generate_rx_packet_descs()
1049 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay); in generate_rx_packet_descs()
1106 latest_cycle = desc->cycle; in compute_pcm_extra_delay()
1112 // Compute cycle count with lower 3 bits of second field and cycle field like timestamp in compute_pcm_extra_delay()
1117 // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since in compute_pcm_extra_delay()
1125 // the most recent isochronous cycle has been already processed. in compute_pcm_extra_delay()
1131 // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle in compute_pcm_extra_delay()
1218 build_it_pkt_header(s, desc->cycle, template, pkt_header_length, in process_rx_packets()
1249 unsigned int cycle; in skip_rx_packets() local
1257 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size); in skip_rx_packets()
1258 s->next_cycle = increment_ohci_cycle_count(cycle, 1); in skip_rx_packets()
1294 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size); in process_rx_packets_intermediately() local
1296 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0) in process_rx_packets_intermediately()
1384 unsigned int cycle; in drop_tx_packets() local
1393 cycle = compute_ohci_cycle_count(ctx_header[1]); in drop_tx_packets()
1394 s->next_cycle = increment_ohci_cycle_count(cycle, 1); in drop_tx_packets()
1423 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]); in process_tx_packets_intermediately() local
1425 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0) in process_tx_packets_intermediately()
1515 // Decide the cycle count to begin processing content of packet in IR contexts. in drop_tx_packets_initially()
1519 unsigned int cycle = UINT_MAX; in drop_tx_packets_initially() local
1538 if (cycle == UINT_MAX || in drop_tx_packets_initially()
1539 compare_ohci_cycle_count(next_cycle, cycle) > 0) in drop_tx_packets_initially()
1540 cycle = next_cycle; in drop_tx_packets_initially()
1545 d->processing_cycle.tx_start = cycle; in drop_tx_packets_initially()
1627 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT in irq_target_callback_skip()
1630 unsigned int cycle = s->next_cycle; in irq_target_callback_skip() local
1635 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0) in irq_target_callback_skip()
1636 cycle = s->next_cycle; in irq_target_callback_skip()
1644 d->processing_cycle.rx_start = cycle; in irq_target_callback_skip()
1907 // Process isochronous packets for recent isochronous cycle to handle in amdtp_domain_stream_pcm_ack()