Lines Matching full:endpoint

135     struct endp_data endpoint[MAX_ENDPOINTS];  member
387 if (dev->endpoint[i].pending_async_packet) { in usbredir_cancel_packet()
388 assert(dev->endpoint[i].pending_async_packet == p); in usbredir_cancel_packet()
389 dev->endpoint[i].pending_async_packet = NULL; in usbredir_cancel_packet()
412 if (dev->endpoint[USBEP2I(ep)].bulk_receiving_started) { in usbredir_fill_already_in_flight_from_ep()
468 if (!dev->endpoint[EP2I(ep)].bufpq_dropping_packets && in bufp_alloc()
469 dev->endpoint[EP2I(ep)].bufpq_size > in bufp_alloc()
470 2 * dev->endpoint[EP2I(ep)].bufpq_target_size) { in bufp_alloc()
472 dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 1; in bufp_alloc()
476 if (dev->endpoint[EP2I(ep)].bufpq_dropping_packets) { in bufp_alloc()
477 if (dev->endpoint[EP2I(ep)].bufpq_size > in bufp_alloc()
478 dev->endpoint[EP2I(ep)].bufpq_target_size) { in bufp_alloc()
482 dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; in bufp_alloc()
491 QTAILQ_INSERT_TAIL(&dev->endpoint[EP2I(ep)].bufpq, bufp, next); in bufp_alloc()
492 dev->endpoint[EP2I(ep)].bufpq_size++; in bufp_alloc()
499 QTAILQ_REMOVE(&dev->endpoint[EP2I(ep)].bufpq, bufp, next); in bufp_free()
500 dev->endpoint[EP2I(ep)].bufpq_size--; in bufp_free()
509 QTAILQ_FOREACH_SAFE(buf, &dev->endpoint[EP2I(ep)].bufpq, next, buf_next) { in usbredir_free_bufpq()
531 if (!dev->endpoint[EP2I(ep)].iso_started && in usbredir_handle_iso_data()
532 !dev->endpoint[EP2I(ep)].iso_error) { in usbredir_handle_iso_data()
534 .endpoint = ep, in usbredir_handle_iso_data()
539 pkts_per_sec = 8000 / dev->endpoint[EP2I(ep)].interval; in usbredir_handle_iso_data()
541 pkts_per_sec = 1000 / dev->endpoint[EP2I(ep)].interval; in usbredir_handle_iso_data()
544 dev->endpoint[EP2I(ep)].bufpq_target_size = (pkts_per_sec * 60) / 1000; in usbredir_handle_iso_data()
556 dev->endpoint[EP2I(ep)].bufpq_target_size, in usbredir_handle_iso_data()
572 dev->endpoint[EP2I(ep)].iso_started = 1; in usbredir_handle_iso_data()
573 dev->endpoint[EP2I(ep)].bufpq_prefilled = 0; in usbredir_handle_iso_data()
574 dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; in usbredir_handle_iso_data()
580 if (dev->endpoint[EP2I(ep)].iso_started && in usbredir_handle_iso_data()
581 !dev->endpoint[EP2I(ep)].bufpq_prefilled) { in usbredir_handle_iso_data()
582 if (dev->endpoint[EP2I(ep)].bufpq_size < in usbredir_handle_iso_data()
583 dev->endpoint[EP2I(ep)].bufpq_target_size) { in usbredir_handle_iso_data()
586 dev->endpoint[EP2I(ep)].bufpq_prefilled = 1; in usbredir_handle_iso_data()
589 isop = QTAILQ_FIRST(&dev->endpoint[EP2I(ep)].bufpq); in usbredir_handle_iso_data()
592 ep, dev->endpoint[EP2I(ep)].iso_error); in usbredir_handle_iso_data()
594 dev->endpoint[EP2I(ep)].bufpq_prefilled = 0; in usbredir_handle_iso_data()
596 status = dev->endpoint[EP2I(ep)].iso_error; in usbredir_handle_iso_data()
597 dev->endpoint[EP2I(ep)].iso_error = 0; in usbredir_handle_iso_data()
602 isop->status, isop->len, dev->endpoint[EP2I(ep)].bufpq_size); in usbredir_handle_iso_data()
618 if (dev->endpoint[EP2I(ep)].iso_started) { in usbredir_handle_iso_data()
620 .endpoint = ep, in usbredir_handle_iso_data()
630 status = dev->endpoint[EP2I(ep)].iso_error; in usbredir_handle_iso_data()
631 dev->endpoint[EP2I(ep)].iso_error = 0; in usbredir_handle_iso_data()
641 .endpoint = ep in usbredir_stop_iso_stream()
643 if (dev->endpoint[EP2I(ep)].iso_started) { in usbredir_stop_iso_stream()
646 dev->endpoint[EP2I(ep)].iso_started = 0; in usbredir_stop_iso_stream()
648 dev->endpoint[EP2I(ep)].iso_error = 0; in usbredir_stop_iso_stream()
653 * The usb-host may poll the endpoint faster then our guest, resulting in lots
675 while ((bulkp = QTAILQ_FIRST(&dev->endpoint[EP2I(ep)].bufpq)) && in usbredir_buffered_bulk_in_complete_raw()
688 const int maxp = dev->endpoint[EP2I(ep)].max_packet_size; in usbredir_buffered_bulk_in_complete_ftdi()
693 while ((bulkp = QTAILQ_FIRST(&dev->endpoint[EP2I(ep)].bufpq)) && in usbredir_buffered_bulk_in_complete_ftdi()
734 /* Input bulk endpoint, buffered packet input */ in usbredir_handle_buffered_bulk_in_data()
735 if (!dev->endpoint[EP2I(ep)].bulk_receiving_started) { in usbredir_handle_buffered_bulk_in_data()
738 .endpoint = ep, in usbredir_handle_buffered_bulk_in_data()
743 bpt = 512 + dev->endpoint[EP2I(ep)].max_packet_size - 1; in usbredir_handle_buffered_bulk_in_data()
744 bpt /= dev->endpoint[EP2I(ep)].max_packet_size; in usbredir_handle_buffered_bulk_in_data()
745 bpt *= dev->endpoint[EP2I(ep)].max_packet_size; in usbredir_handle_buffered_bulk_in_data()
752 dev->endpoint[EP2I(ep)].bulk_receiving_started = 1; in usbredir_handle_buffered_bulk_in_data()
755 dev->endpoint[EP2I(ep)].bufpq_target_size = 5000; in usbredir_handle_buffered_bulk_in_data()
756 dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; in usbredir_handle_buffered_bulk_in_data()
759 if (QTAILQ_EMPTY(&dev->endpoint[EP2I(ep)].bufpq)) { in usbredir_handle_buffered_bulk_in_data()
761 assert(dev->endpoint[EP2I(ep)].pending_async_packet == NULL); in usbredir_handle_buffered_bulk_in_data()
762 dev->endpoint[EP2I(ep)].pending_async_packet = p; in usbredir_handle_buffered_bulk_in_data()
772 .endpoint = ep, in usbredir_stop_bulk_receiving()
775 if (dev->endpoint[EP2I(ep)].bulk_receiving_started) { in usbredir_stop_bulk_receiving()
778 dev->endpoint[EP2I(ep)].bulk_receiving_started = 0; in usbredir_stop_bulk_receiving()
788 const int maxp = dev->endpoint[EP2I(ep)].max_packet_size; in usbredir_handle_bulk_data()
795 if (dev->endpoint[EP2I(ep)].bulk_receiving_enabled) { in usbredir_handle_bulk_data()
801 assert(dev->endpoint[EP2I(ep)].pending_async_packet == NULL); in usbredir_handle_bulk_data()
803 dev->endpoint[EP2I(ep)].bulk_receiving_enabled = 0; in usbredir_handle_bulk_data()
809 bulk_packet.endpoint = ep; in usbredir_handle_bulk_data()
834 /* Input interrupt endpoint, buffered packet input */ in usbredir_handle_interrupt_in_data()
838 if (!dev->endpoint[EP2I(ep)].interrupt_started && in usbredir_handle_interrupt_in_data()
839 !dev->endpoint[EP2I(ep)].interrupt_error) { in usbredir_handle_interrupt_in_data()
841 .endpoint = ep, in usbredir_handle_interrupt_in_data()
848 dev->endpoint[EP2I(ep)].interrupt_started = 1; in usbredir_handle_interrupt_in_data()
851 dev->endpoint[EP2I(ep)].bufpq_target_size = 1000; in usbredir_handle_interrupt_in_data()
852 dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; in usbredir_handle_interrupt_in_data()
857 QTAILQ_FOREACH(intp, &dev->endpoint[EP2I(ep)].bufpq, next) { in usbredir_handle_interrupt_in_data()
859 if (intp->len < dev->endpoint[EP2I(ep)].max_packet_size || in usbredir_handle_interrupt_in_data()
867 status = dev->endpoint[EP2I(ep)].interrupt_error; in usbredir_handle_interrupt_in_data()
868 dev->endpoint[EP2I(ep)].interrupt_error = 0; in usbredir_handle_interrupt_in_data()
881 QTAILQ_FOREACH(intp, &dev->endpoint[EP2I(ep)].bufpq, next) { in usbredir_handle_interrupt_in_data()
903 if (intp->len < dev->endpoint[EP2I(ep)].max_packet_size || in usbredir_handle_interrupt_in_data()
918 * expect immediate completion for an interrupt endpoint, and handling this
931 interrupt_packet.endpoint = ep; in usbredir_handle_interrupt_out_data()
945 .endpoint = ep in usbredir_stop_interrupt_receiving()
947 if (dev->endpoint[EP2I(ep)].interrupt_started) { in usbredir_stop_interrupt_receiving()
951 dev->endpoint[EP2I(ep)].interrupt_started = 0; in usbredir_stop_interrupt_receiving()
953 dev->endpoint[EP2I(ep)].interrupt_error = 0; in usbredir_stop_interrupt_receiving()
967 switch (dev->endpoint[EP2I(ep)].type) { in usbredir_handle_data()
992 dev->endpoint[EP2I(ep)].type); in usbredir_handle_data()
1008 switch (dev->endpoint[i].type) { in usbredir_stop_ep()
1070 if (dev->endpoint[i].interface == interface) { in usbredir_set_interface()
1133 control_packet.endpoint = control_packet.requesttype & USB_DIR_IN; in usbredir_handle_control()
1416 memset(dev->endpoint, 0, sizeof(dev->endpoint)); in usbredir_init_endpoints()
1418 dev->endpoint[i].dev = dev; in usbredir_init_endpoints()
1419 QTAILQ_INIT(&dev->endpoint[i].bufpq); in usbredir_init_endpoints()
1555 dev->endpoint[i].bulk_receiving_enabled = 0; in usbredir_check_bulk_receiving()
1580 if (dev->endpoint[j].interface == in usbredir_check_bulk_receiving()
1582 dev->endpoint[j].type == USB_ENDPOINT_XFER_BULK && in usbredir_check_bulk_receiving()
1583 dev->endpoint[j].max_packet_size != 0) { in usbredir_check_bulk_receiving()
1584 dev->endpoint[j].bulk_receiving_enabled = 1; in usbredir_check_bulk_receiving()
1782 usb_ep->type = dev->endpoint[i].type; in usbredir_setup_usb_eps()
1783 usb_ep->ifnum = dev->endpoint[i].interface; in usbredir_setup_usb_eps()
1784 usb_ep->max_packet_size = dev->endpoint[i].max_packet_size; in usbredir_setup_usb_eps()
1785 usb_ep->max_streams = dev->endpoint[i].max_streams; in usbredir_setup_usb_eps()
1798 dev->endpoint[i].type = ep_info->type[i]; in usbredir_ep_info()
1799 dev->endpoint[i].interval = ep_info->interval[i]; in usbredir_ep_info()
1800 dev->endpoint[i].interface = ep_info->interface[i]; in usbredir_ep_info()
1803 dev->endpoint[i].max_packet_size = ep_info->max_packet_size[i]; in usbredir_ep_info()
1808 dev->endpoint[i].max_streams = ep_info->max_streams[i]; in usbredir_ep_info()
1811 switch (dev->endpoint[i].type) { in usbredir_ep_info()
1829 if (dev->endpoint[i].interval == 0) { in usbredir_ep_info()
1830 ERROR("Received 0 interval for isoc or irq endpoint\n"); in usbredir_ep_info()
1838 dev->endpoint[i].type, dev->endpoint[i].interface); in usbredir_ep_info()
1841 ERROR("Received invalid endpoint type\n"); in usbredir_ep_info()
1849 ERROR("Device no longer matches speed after endpoint info change, " in usbredir_ep_info()
1903 uint8_t ep = iso_stream_status->endpoint; in usbredir_iso_stream_status()
1908 if (!dev->dev.attached || !dev->endpoint[EP2I(ep)].iso_started) { in usbredir_iso_stream_status()
1912 dev->endpoint[EP2I(ep)].iso_error = iso_stream_status->status; in usbredir_iso_stream_status()
1915 dev->endpoint[EP2I(ep)].iso_started = 0; in usbredir_iso_stream_status()
1924 uint8_t ep = interrupt_receiving_status->endpoint; in usbredir_interrupt_receiving_status()
1929 if (!dev->dev.attached || !dev->endpoint[EP2I(ep)].interrupt_started) { in usbredir_interrupt_receiving_status()
1933 dev->endpoint[EP2I(ep)].interrupt_error = in usbredir_interrupt_receiving_status()
1937 dev->endpoint[EP2I(ep)].interrupt_started = 0; in usbredir_interrupt_receiving_status()
1964 uint8_t ep = bulk_receiving_status->endpoint; in usbredir_bulk_receiving_status()
1969 if (!dev->dev.attached || !dev->endpoint[EP2I(ep)].bulk_receiving_started) { in usbredir_bulk_receiving_status()
1975 dev->endpoint[EP2I(ep)].bulk_receiving_started = 0; in usbredir_bulk_receiving_status()
2042 uint8_t ep = bulk_packet->endpoint; in usbredir_bulk_packet()
2078 uint8_t ep = iso_packet->endpoint; in usbredir_iso_packet()
2083 if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_ISOC) { in usbredir_iso_packet()
2084 ERROR("received iso packet for non iso endpoint %02X\n", ep); in usbredir_iso_packet()
2089 if (dev->endpoint[EP2I(ep)].iso_started == 0) { in usbredir_iso_packet()
2104 uint8_t ep = interrupt_packet->endpoint; in usbredir_interrupt_packet()
2109 if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_INT) { in usbredir_interrupt_packet()
2110 ERROR("received int packet for non interrupt endpoint %02X\n", ep); in usbredir_interrupt_packet()
2116 if (dev->endpoint[EP2I(ep)].interrupt_started == 0) { in usbredir_interrupt_packet()
2144 uint8_t status, ep = buffered_bulk_packet->endpoint; in usbredir_buffered_bulk_packet()
2151 if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_BULK) { in usbredir_buffered_bulk_packet()
2157 if (dev->endpoint[EP2I(ep)].bulk_receiving_started == 0) { in usbredir_buffered_bulk_packet()
2164 len = dev->endpoint[EP2I(ep)].max_packet_size; in usbredir_buffered_bulk_packet()
2181 if (dev->endpoint[EP2I(ep)].pending_async_packet) { in usbredir_buffered_bulk_packet()
2182 USBPacket *p = dev->endpoint[EP2I(ep)].pending_async_packet; in usbredir_buffered_bulk_packet()
2183 dev->endpoint[EP2I(ep)].pending_async_packet = NULL; in usbredir_buffered_bulk_packet()
2558 VMSTATE_STRUCT_ARRAY(endpoint, USBRedirDevice, MAX_ENDPOINTS, 1,