Lines Matching refs:ep_ring

369 		struct cdnsp_ring *ep_ring;
374 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
375 if (!ep_ring)
378 if (!ep_ring->stream_active || ep_ring->stream_rejected)
381 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
439 struct cdnsp_ring *ep_ring;
444 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
445 if (!ep_ring)
453 new_seg = ep_ring->deq_seg;
454 new_deq = ep_ring->dequeue;
480 cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
505 struct cdnsp_ring *ep_ring,
522 cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
639 struct cdnsp_ring *ep_ring;
658 ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
660 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
661 ep_ring->deq_seg = ep_ring->deq_seg->next;
662 ep_ring->dequeue = ep_ring->deq_seg->trbs;
665 while (ep_ring->dequeue != deq_state->new_deq_ptr) {
666 ep_ring->num_trbs_free++;
667 ep_ring->dequeue++;
669 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
670 if (ep_ring->dequeue == deq_state->new_deq_ptr)
673 ep_ring->deq_seg = ep_ring->deq_seg->next;
674 ep_ring->dequeue = ep_ring->deq_seg->trbs;
697 struct cdnsp_ring *ep_ring;
709 ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
727 cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
734 ep_ring->num_tds--;
747 cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
896 struct cdnsp_ring *ep_ring,
902 cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
914 ep_ring->num_tds--;
926 struct cdnsp_ring *ep_ring;
929 ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
944 while (ep_ring->dequeue != td->last_trb)
945 cdnsp_inc_deq(pdev, ep_ring);
947 cdnsp_inc_deq(pdev, ep_ring);
949 cdnsp_td_cleanup(pdev, td, ep_ring, status);
1004 struct cdnsp_ring *ep_ring;
1009 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1028 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1032 cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
1055 struct cdnsp_ring *ep_ring;
1059 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1098 td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
1115 struct cdnsp_ring *ep_ring;
1117 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1122 while (ep_ring->dequeue != td->last_trb)
1123 cdnsp_inc_deq(pdev, ep_ring);
1125 cdnsp_inc_deq(pdev, ep_ring);
1127 cdnsp_td_cleanup(pdev, td, ep_ring, &status);
1141 struct cdnsp_ring *ep_ring;
1144 ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1168 ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
1182 struct cdnsp_ring *ep_ring;
1203 ep_ring = pep->stream_info.stream_rings[cur_stream];
1204 ep_ring->stream_active = 1;
1205 ep_ring->stream_rejected = 0;
1213 ep_ring = pep->stream_info.stream_rings[dev_sid];
1214 ep_ring->stream_active = 0;
1215 ep_ring->stream_rejected = 1;
1217 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
1236 struct cdnsp_ring *ep_ring;
1252 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1268 if (!ep_ring) {
1299 * Set skip flag of the ep_ring; Complete the missed tds as
1300 * short transfer when process the ep_ring next time.
1311 if (list_empty(&ep_ring->td_list)) {
1321 ep_ring->last_td_was_short))
1322 trace_cdnsp_trb_without_td(ep_ring,
1333 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1337 ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
1338 ep_ring->dequeue, td->last_trb,
1347 trace_cdnsp_handle_transfer(ep_ring,
1357 * of FSE is not in the current TD pointed by ep_ring->dequeue
1385 ep_ring->last_td_was_short = true;
1387 ep_ring->last_td_was_short = false;
1629 struct cdnsp_ring *ep_ring,
1648 if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
1653 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
1654 if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
1661 while (cdnsp_trb_is_link(ep_ring->enqueue)) {
1662 ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
1665 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
1668 if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
1669 ep_ring->cycle_state ^= 1;
1670 ep_ring->enq_seg = ep_ring->enq_seg->next;
1671 ep_ring->enqueue = ep_ring->enq_seg->trbs;
1680 struct cdnsp_ring *ep_ring;
1683 ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
1685 if (!ep_ring)
1688 ret = cdnsp_prepare_ring(pdev, ep_ring,
1698 list_add_tail(&preq->td.td_list, &ep_ring->td_list);
1699 ep_ring->num_tds++;
1702 preq->td.start_seg = ep_ring->enq_seg;
1703 preq->td.first_trb = ep_ring->enqueue;
2049 struct cdnsp_ring *ep_ring;
2054 ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
2055 if (!ep_ring)
2088 cdnsp_queue_trb(pdev, ep_ring, true,
2091 field | ep_ring->cycle_state |
2101 cdnsp_queue_trb(pdev, ep_ring, true,
2104 field | ep_ring->cycle_state |
2113 preq->td.last_trb = ep_ring->enqueue;
2117 field = ep_ring->cycle_state;
2119 field = (ep_ring->cycle_state ^ 1);
2131 cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
2229 struct cdnsp_ring *ep_ring;
2238 ep_ring = preq->pep->ring;
2258 start_trb = &ep_ring->enqueue->generic;
2259 start_cycle = ep_ring->cycle_state;
2305 field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
2321 preq->td.last_trb = ep_ring->enqueue;
2325 cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
2364 ep_ring->num_tds--;
2373 preq->td.last_trb = ep_ring->enqueue;
2375 cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
2378 ep_ring->enqueue = preq->td.first_trb;
2379 ep_ring->enq_seg = preq->td.start_seg;
2380 ep_ring->cycle_state = start_cycle;