Lines Matching refs:td
111 static bool last_td_in_urb(struct xhci_td *td) in last_td_in_urb() argument
113 struct urb_priv *urb_priv = td->urb->hcpriv; in last_td_in_urb()
289 static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma) in trb_in_td() argument
296 start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb); in trb_in_td()
297 cur_seg = td->start_seg; in trb_in_td()
306 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb); in trb_in_td()
331 } while (cur_seg != td->start_seg); in trb_in_td()
690 unsigned int stream_id, struct xhci_td *td) in xhci_move_dequeue_past_td() argument
717 new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE; in xhci_move_dequeue_past_td()
731 if (new_deq == td->end_trb) in xhci_move_dequeue_past_td()
800 static void td_to_noop(struct xhci_td *td, bool flip_cycle) in td_to_noop() argument
802 struct xhci_segment *seg = td->start_seg; in td_to_noop()
803 union xhci_trb *trb = td->start_trb; in td_to_noop()
809 if (flip_cycle && trb != td->start_trb && trb != td->end_trb) in td_to_noop()
812 if (trb == td->end_trb) in td_to_noop()
840 struct xhci_ring *ring, struct xhci_td *td) in xhci_unmap_td_bounce_buffer() argument
843 struct xhci_segment *seg = td->bounce_seg; in xhci_unmap_td_bounce_buffer()
844 struct urb *urb = td->urb; in xhci_unmap_td_bounce_buffer()
873 static void xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_td_cleanup() argument
879 urb = td->urb; in xhci_td_cleanup()
882 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); in xhci_td_cleanup()
896 if (!list_empty(&td->td_list)) in xhci_td_cleanup()
897 list_del_init(&td->td_list); in xhci_td_cleanup()
899 if (!list_empty(&td->cancelled_td_list)) in xhci_td_cleanup()
900 list_del_init(&td->cancelled_td_list); in xhci_td_cleanup()
904 if (last_td_in_urb(td)) { in xhci_td_cleanup()
915 xhci_giveback_urb_in_irq(xhci, td, status); in xhci_td_cleanup()
920 static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_ring *ring, in xhci_dequeue_td() argument
923 ring->dequeue = td->end_trb; in xhci_dequeue_td()
924 ring->deq_seg = td->end_seg; in xhci_dequeue_td()
927 xhci_td_cleanup(xhci, td, ring, status); in xhci_dequeue_td()
934 struct xhci_td *td, *tmp_td; in xhci_giveback_invalidated_tds() local
936 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_giveback_invalidated_tds()
939 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_giveback_invalidated_tds()
941 if (td->cancel_status == TD_CLEARED) { in xhci_giveback_invalidated_tds()
943 __func__, td->urb); in xhci_giveback_invalidated_tds()
944 xhci_td_cleanup(ep->xhci, td, ring, td->status); in xhci_giveback_invalidated_tds()
947 __func__, td->urb, td->cancel_status); in xhci_giveback_invalidated_tds()
980 struct xhci_td *td, in xhci_handle_halted_endpoint() argument
996 if (td && list_empty(&td->cancelled_td_list)) { in xhci_handle_halted_endpoint()
997 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); in xhci_handle_halted_endpoint()
998 td->cancel_status = TD_HALTED; in xhci_handle_halted_endpoint()
1031 struct xhci_td *td = NULL; in xhci_invalidate_cancelled_tds() local
1048 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1052 td->start_seg, td->start_trb), in xhci_invalidate_cancelled_tds()
1053 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1054 list_del_init(&td->td_list); in xhci_invalidate_cancelled_tds()
1055 ring = xhci_urb_to_transfer_ring(xhci, td->urb); in xhci_invalidate_cancelled_tds()
1058 td->urb, td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1068 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1071 if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) { in xhci_invalidate_cancelled_tds()
1072 switch (td->cancel_status) { in xhci_invalidate_cancelled_tds()
1080 if (cached_td->urb->stream_id != td->urb->stream_id) { in xhci_invalidate_cancelled_tds()
1084 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1085 td->cancel_status = TD_CLEARING_CACHE_DEFERRED; in xhci_invalidate_cancelled_tds()
1092 td->urb, cached_td->urb, in xhci_invalidate_cancelled_tds()
1093 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1097 td_to_noop(td, false); in xhci_invalidate_cancelled_tds()
1098 td->cancel_status = TD_CLEARING_CACHE; in xhci_invalidate_cancelled_tds()
1099 cached_td = td; in xhci_invalidate_cancelled_tds()
1103 td_to_noop(td, false); in xhci_invalidate_cancelled_tds()
1104 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1117 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1123 if (td->cancel_status != TD_CLEARING_CACHE && in xhci_invalidate_cancelled_tds()
1124 td->cancel_status != TD_CLEARING_CACHE_DEFERRED) in xhci_invalidate_cancelled_tds()
1127 td->urb); in xhci_invalidate_cancelled_tds()
1128 td_to_noop(td, false); in xhci_invalidate_cancelled_tds()
1129 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1154 struct xhci_td *td; in find_halted_td() local
1160 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); in find_halted_td()
1161 if (trb_in_td(td, hw_deq)) in find_halted_td()
1162 return td; in find_halted_td()
1183 struct xhci_td *td = NULL; in xhci_handle_cmd_stop_ep() local
1233 td = find_halted_td(ep); in xhci_handle_cmd_stop_ep()
1234 if (td) in xhci_handle_cmd_stop_ep()
1235 td->status = -EPROTO; in xhci_handle_cmd_stop_ep()
1238 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); in xhci_handle_cmd_stop_ep()
1420 struct xhci_td *td, *tmp_td; in xhci_handle_cmd_set_deq() local
1516 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_handle_cmd_set_deq()
1518 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_handle_cmd_set_deq()
1519 if (td->cancel_status == TD_CLEARING_CACHE) { in xhci_handle_cmd_set_deq()
1520 td->cancel_status = TD_CLEARED; in xhci_handle_cmd_set_deq()
1522 __func__, td->urb); in xhci_handle_cmd_set_deq()
1523 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); in xhci_handle_cmd_set_deq()
1526 __func__, td->urb, td->cancel_status); in xhci_handle_cmd_set_deq()
2157 static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_clear_hub_tt_buffer() argument
2164 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && in xhci_clear_hub_tt_buffer()
2165 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && in xhci_clear_hub_tt_buffer()
2168 td->urb->ep->hcpriv = td->urb->dev; in xhci_clear_hub_tt_buffer()
2169 if (usb_hub_clear_tt_buffer(td->urb)) in xhci_clear_hub_tt_buffer()
2219 struct xhci_ring *ep_ring, struct xhci_td *td, in finish_td() argument
2257 !list_empty(&td->cancelled_td_list)) { in finish_td()
2260 td->start_seg, td->start_trb)); in finish_td()
2267 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2268 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2282 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2284 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2291 xhci_dequeue_td(xhci, td, ep_ring, td->status); in finish_td()
2295 static u32 sum_trb_lengths(struct xhci_td *td, union xhci_trb *stop_trb) in sum_trb_lengths() argument
2298 union xhci_trb *trb = td->start_trb; in sum_trb_lengths()
2299 struct xhci_segment *seg = td->start_seg; in sum_trb_lengths()
2312 struct xhci_ring *ep_ring, struct xhci_td *td, in process_ctrl_td() argument
2323 requested = td->urb->transfer_buffer_length; in process_ctrl_td()
2331 td->status = -ESHUTDOWN; in process_ctrl_td()
2334 td->status = 0; in process_ctrl_td()
2337 td->status = 0; in process_ctrl_td()
2341 td->urb->actual_length = remaining; in process_ctrl_td()
2348 td->urb->actual_length = 0; in process_ctrl_td()
2352 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2355 td->urb->actual_length = requested; in process_ctrl_td()
2373 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2374 else if (!td->urb_length_set) in process_ctrl_td()
2375 td->urb->actual_length = 0; in process_ctrl_td()
2389 td->urb_length_set = true; in process_ctrl_td()
2390 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2396 if (!td->urb_length_set) in process_ctrl_td()
2397 td->urb->actual_length = requested; in process_ctrl_td()
2400 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_ctrl_td()
2407 struct xhci_ring *ep_ring, struct xhci_td *td, in process_isoc_td() argument
2419 urb_priv = td->urb->hcpriv; in process_isoc_td()
2421 frame = &td->urb->iso_frame_desc[idx]; in process_isoc_td()
2425 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? in process_isoc_td()
2432 if (td->error_mid_td) in process_isoc_td()
2453 if (ep_trb != td->end_trb) in process_isoc_td()
2454 td->error_mid_td = true; in process_isoc_td()
2459 if (ep_trb != td->end_trb) in process_isoc_td()
2460 td->error_mid_td = true; in process_isoc_td()
2469 if (ep_trb != td->end_trb) in process_isoc_td()
2470 td->error_mid_td = true; in process_isoc_td()
2492 if (td->urb_length_set) in process_isoc_td()
2496 frame->actual_length = sum_trb_lengths(td, ep_trb) + in process_isoc_td()
2501 td->urb->actual_length += frame->actual_length; in process_isoc_td()
2505 if (td->error_mid_td && ep_trb != td->end_trb) { in process_isoc_td()
2507 td->urb_length_set = true; in process_isoc_td()
2510 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_isoc_td()
2513 static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2520 urb_priv = td->urb->hcpriv; in skip_isoc_td()
2522 frame = &td->urb->iso_frame_desc[idx]; in skip_isoc_td()
2530 xhci_dequeue_td(xhci, td, ep->ring, status); in skip_isoc_td()
2537 struct xhci_ring *ep_ring, struct xhci_td *td, in process_bulk_intr_td() argument
2548 requested = td->urb->transfer_buffer_length; in process_bulk_intr_td()
2554 if (ep_trb != td->end_trb || remaining) { in process_bulk_intr_td()
2557 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2560 td->status = 0; in process_bulk_intr_td()
2563 td->status = 0; in process_bulk_intr_td()
2566 td->urb->actual_length = remaining; in process_bulk_intr_td()
2570 td->urb->actual_length = sum_trb_lengths(td, ep_trb); in process_bulk_intr_td()
2578 td->status = 0; in process_bulk_intr_td()
2580 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); in process_bulk_intr_td()
2587 if (ep_trb == td->end_trb) in process_bulk_intr_td()
2588 td->urb->actual_length = requested - remaining; in process_bulk_intr_td()
2590 td->urb->actual_length = in process_bulk_intr_td()
2591 sum_trb_lengths(td, ep_trb) + in process_bulk_intr_td()
2597 td->urb->actual_length = 0; in process_bulk_intr_td()
2600 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_bulk_intr_td()
2659 struct xhci_td *td = NULL; in handle_tx_event() local
2831 td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list); in handle_tx_event()
2833 if (td && td->error_mid_td && !trb_in_td(td, ep_trb_dma)) { in handle_tx_event()
2835 xhci_dequeue_td(xhci, td, ep_ring, td->status); in handle_tx_event()
2862 td = list_first_entry(&ep_ring->td_list, struct xhci_td, in handle_tx_event()
2866 ep_seg = trb_in_td(td, ep_trb_dma); in handle_tx_event()
2870 if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { in handle_tx_event()
2875 skip_isoc_td(xhci, td, ep, status); in handle_tx_event()
2895 td = NULL; in handle_tx_event()
2964 td->status = status; in handle_tx_event()
2967 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) in handle_tx_event()
2968 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2969 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) in handle_tx_event()
2970 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2972 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2977 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in handle_tx_event()
2984 (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb), in handle_tx_event()
2985 (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb)); in handle_tx_event()
3354 struct xhci_td *td; in prepare_transfer() local
3372 td = &urb_priv->td[td_index]; in prepare_transfer()
3374 INIT_LIST_HEAD(&td->td_list); in prepare_transfer()
3375 INIT_LIST_HEAD(&td->cancelled_td_list); in prepare_transfer()
3383 td->urb = urb; in prepare_transfer()
3385 list_add_tail(&td->td_list, &ep_ring->td_list); in prepare_transfer()
3386 td->start_seg = ep_ring->enq_seg; in prepare_transfer()
3387 td->start_trb = ep_ring->enqueue; in prepare_transfer()
3634 struct xhci_td *td; in xhci_queue_bulk_tx() local
3676 td = &urb_priv->td[0]; in xhci_queue_bulk_tx()
3718 td->bounce_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3726 td->end_trb = ring->enqueue; in xhci_queue_bulk_tx()
3727 td->end_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3775 urb_priv->td[1].end_trb = ring->enqueue; in xhci_queue_bulk_tx()
3776 urb_priv->td[1].end_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3799 struct xhci_td *td; in xhci_queue_ctrl_tx() local
3842 td = &urb_priv->td[0]; in xhci_queue_ctrl_tx()
3914 td->end_trb = ep_ring->enqueue; in xhci_queue_ctrl_tx()
3915 td->end_seg = ep_ring->enq_seg; in xhci_queue_ctrl_tx()
4101 struct xhci_td *td; in xhci_queue_isoc_tx() local
4158 td = &urb_priv->td[i]; in xhci_queue_isoc_tx()
4200 td->end_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4201 td->end_seg = ep_ring->enq_seg; in xhci_queue_isoc_tx()
4262 list_del_init(&urb_priv->td[i].td_list); in xhci_queue_isoc_tx()
4269 urb_priv->td[0].end_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4271 td_to_noop(&urb_priv->td[0], true); in xhci_queue_isoc_tx()
4274 ep_ring->enqueue = urb_priv->td[0].start_trb; in xhci_queue_isoc_tx()
4275 ep_ring->enq_seg = urb_priv->td[0].start_seg; in xhci_queue_isoc_tx()