Lines Matching full:cycle
220 // of syt interval. This comes from the interval of isoc cycle. As 1394 in amdtp_stream_add_pcm_hw_constraints()
507 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, in build_it_pkt_header() argument
528 trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks, in build_it_pkt_header()
628 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, in parse_ir_ctx_header() argument
664 trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks, in parse_ir_ctx_header()
679 static inline u32 increment_cycle_count(u32 cycle, unsigned int addend) in increment_cycle_count() argument
681 cycle += addend; in increment_cycle_count()
682 if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND) in increment_cycle_count()
683 cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND; in increment_cycle_count()
684 return cycle; in increment_cycle_count()
687 // Align to actual cycle count for the packet which is going to be scheduled.
688 // This module queued the same number of isochronous cycle as the size of queue
689 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
690 // the size of queue for scheduled cycle.
694 u32 cycle = compute_cycle_count(ctx_header_tstamp); in compute_it_cycle() local
695 return increment_cycle_count(cycle, queue_size); in compute_it_cycle()
710 unsigned int cycle; in generate_device_pkt_descs() local
715 cycle = compute_cycle_count(ctx_header[1]); in generate_device_pkt_descs()
717 err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length, in generate_device_pkt_descs()
722 desc->cycle = cycle; in generate_device_pkt_descs()
740 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle, in compute_syt() argument
746 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) | in compute_syt()
766 desc->cycle = compute_it_cycle(*ctx_header, s->queue_size); in generate_pkt_descs()
770 syt = compute_syt(syt, desc->cycle, in generate_pkt_descs()
853 build_it_pkt_header(s, desc->cycle, &template.params, in out_stream_callback()
997 u32 cycle; in amdtp_stream_first_callback() local
1007 cycle = compute_cycle_count(ctx_header[1]); in amdtp_stream_first_callback()
1011 cycle = compute_it_cycle(*ctx_header, s->queue_size); in amdtp_stream_first_callback()
1019 s->start_cycle = cycle; in amdtp_stream_first_callback()
1029 * @start_cycle: the isochronous cycle to start the context. Start immediately
1203 // preemption to keep latency against bus cycle. in amdtp_domain_stream_pcm_pointer()
1225 // Process isochronous packets for recent isochronous cycle to handle in amdtp_domain_stream_pcm_ack()
1229 // preemption to keep latency against bus cycle. in amdtp_domain_stream_pcm_ack()
1377 * @ir_delay_cycle: the cycle delay to start all IR contexts.
1398 int cycle; in amdtp_domain_start() local
1435 err = get_current_cycle_time(fw_card, &cycle); in amdtp_domain_start()
1439 // No need to care overflow in cycle field because of enough in amdtp_domain_start()
1441 cycle += ir_delay_cycle; in amdtp_domain_start()
1444 if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) { in amdtp_domain_start()
1448 sec = (cycle & 0xffffe000) >> 13; in amdtp_domain_start()
1449 cycle = (++sec << 13) | in amdtp_domain_start()
1450 ((cycle & 0x00001fff) / CYCLES_PER_SECOND); in amdtp_domain_start()
1455 cycle &= 0x00007fff; in amdtp_domain_start()
1457 cycle = -1; in amdtp_domain_start()
1464 cycle_match = cycle; in amdtp_domain_start()