Lines Matching +full:imx21 +full:- +full:clock
1 // SPDX-License-Identifier: GPL-2.0+
3 * USB Host Controller Driver for IMX21
47 #include <linux/dma-mapping.h>
50 #include "imx21-hcd.h"
57 #define DEBUG_LOG_FRAME(imx21, etd, event) \ argument
58 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
60 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) argument
63 static const char hcd_name[] = "imx21-hcd";
65 static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd) in hcd_to_imx21()
67 return (struct imx21 *)hcd->hcd_priv; in hcd_to_imx21()
75 static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask) in set_register_bits() argument
77 void __iomem *reg = imx21->regs + offset; in set_register_bits()
81 static inline void clear_register_bits(struct imx21 *imx21, in clear_register_bits() argument
84 void __iomem *reg = imx21->regs + offset; in clear_register_bits()
88 static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) in clear_toggle_bit() argument
90 void __iomem *reg = imx21->regs + offset; in clear_toggle_bit()
96 static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) in set_toggle_bit() argument
98 void __iomem *reg = imx21->regs + offset; in set_toggle_bit()
104 static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value) in etd_writel() argument
106 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword)); in etd_writel()
109 static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword) in etd_readl() argument
111 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword)); in etd_readl()
122 return (s16)((s16)after - (s16)frame) < 0; in frame_after()
127 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_get_frame() local
129 return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); in imx21_hc_get_frame()
137 #include "imx21-dbg.c"
140 struct imx21 *imx21, struct etd_priv *etd, int status);
141 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
142 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
148 static int alloc_etd(struct imx21 *imx21) in alloc_etd() argument
151 struct etd_priv *etd = imx21->etd; in alloc_etd()
154 if (etd->alloc == 0) { in alloc_etd()
155 memset(etd, 0, sizeof(imx21->etd[0])); in alloc_etd()
156 etd->alloc = 1; in alloc_etd()
157 debug_etd_allocated(imx21); in alloc_etd()
161 return -1; in alloc_etd()
164 static void disactivate_etd(struct imx21 *imx21, int num) in disactivate_etd() argument
167 struct etd_priv *etd = &imx21->etd[num]; in disactivate_etd()
169 writel(etd_mask, imx21->regs + USBH_ETDENCLR); in disactivate_etd()
170 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask); in disactivate_etd()
171 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR); in disactivate_etd()
172 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); in disactivate_etd()
174 etd->active_count = 0; in disactivate_etd()
176 DEBUG_LOG_FRAME(imx21, etd, disactivated); in disactivate_etd()
179 static void reset_etd(struct imx21 *imx21, int num) in reset_etd() argument
181 struct etd_priv *etd = imx21->etd + num; in reset_etd()
184 disactivate_etd(imx21, num); in reset_etd()
187 etd_writel(imx21, num, i, 0); in reset_etd()
188 etd->urb = NULL; in reset_etd()
189 etd->ep = NULL; in reset_etd()
190 etd->td = NULL; in reset_etd()
191 etd->bounce_buffer = NULL; in reset_etd()
194 static void free_etd(struct imx21 *imx21, int num) in free_etd() argument
200 dev_err(imx21->dev, "BAD etd=%d!\n", num); in free_etd()
203 if (imx21->etd[num].alloc == 0) { in free_etd()
204 dev_err(imx21->dev, "ETD %d already free!\n", num); in free_etd()
208 debug_etd_freed(imx21); in free_etd()
209 reset_etd(imx21, num); in free_etd()
210 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); in free_etd()
214 static void setup_etd_dword0(struct imx21 *imx21, in setup_etd_dword0() argument
217 etd_writel(imx21, etd_num, 0, in setup_etd_dword0()
218 ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS | in setup_etd_dword0()
219 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) | in setup_etd_dword0()
221 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ? in setup_etd_dword0()
223 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) | in setup_etd_dword0()
232 struct imx21 *imx21, int dmem_offset, void *src, int count) in copy_to_dmem() argument
234 void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset; in copy_to_dmem()
254 static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir) in activate_etd() argument
257 struct etd_priv *etd = &imx21->etd[etd_num]; in activate_etd()
259 if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) { in activate_etd()
261 if (etd->len <= etd->dmem_size) { in activate_etd()
264 copy_to_dmem(imx21, in activate_etd()
265 etd->dmem_offset, in activate_etd()
266 etd->cpu_buffer, etd->len); in activate_etd()
268 etd->dma_handle = 0; in activate_etd()
276 etd->bounce_buffer = kmalloc(etd->len, in activate_etd()
280 etd->bounce_buffer = kmemdup(etd->cpu_buffer, in activate_etd()
281 etd->len, in activate_etd()
284 if (!etd->bounce_buffer) { in activate_etd()
285 dev_err(imx21->dev, "failed bounce alloc\n"); in activate_etd()
289 etd->dma_handle = in activate_etd()
290 dma_map_single(imx21->dev, in activate_etd()
291 etd->bounce_buffer, in activate_etd()
292 etd->len, in activate_etd()
294 if (dma_mapping_error(imx21->dev, etd->dma_handle)) { in activate_etd()
295 dev_err(imx21->dev, "failed bounce map\n"); in activate_etd()
301 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); in activate_etd()
302 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); in activate_etd()
303 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); in activate_etd()
304 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); in activate_etd()
306 if (etd->dma_handle) { in activate_etd()
307 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); in activate_etd()
308 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); in activate_etd()
309 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); in activate_etd()
310 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num)); in activate_etd()
311 set_register_bits(imx21, USB_ETDDMAEN, etd_mask); in activate_etd()
315 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); in activate_etd()
316 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); in activate_etd()
320 DEBUG_LOG_FRAME(imx21, etd, activated); in activate_etd()
323 if (!etd->active_count) { in activate_etd()
325 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); in activate_etd()
326 etd->disactivated_frame = -1; in activate_etd()
327 etd->last_int_frame = -1; in activate_etd()
328 etd->last_req_frame = -1; in activate_etd()
331 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); in activate_etd()
335 etd->active_count = 1; in activate_etd()
336 writel(etd_mask, imx21->regs + USBH_ETDENSET); in activate_etd()
340 kfree(etd->bounce_buffer); in activate_etd()
343 free_dmem(imx21, etd); in activate_etd()
344 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM); in activate_etd()
351 static int alloc_dmem(struct imx21 *imx21, unsigned int size, in alloc_dmem() argument
361 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n", in alloc_dmem()
363 return -EINVAL; in alloc_dmem()
366 list_for_each_entry(tmp, &imx21->dmem_list, list) { in alloc_dmem()
369 if ((size + offset) <= tmp->offset) in alloc_dmem()
371 offset = tmp->size + tmp->offset; in alloc_dmem()
378 return -ENOMEM; in alloc_dmem()
380 area->ep = ep; in alloc_dmem()
381 area->offset = offset; in alloc_dmem()
382 area->size = size; in alloc_dmem()
383 list_add_tail(&area->list, &tmp->list); in alloc_dmem()
384 debug_dmem_allocated(imx21, size); in alloc_dmem()
388 return -ENOMEM; in alloc_dmem()
391 /* Memory now available for a queued ETD - activate it */
392 static void activate_queued_etd(struct imx21 *imx21, in activate_queued_etd() argument
395 struct urb_priv *urb_priv = etd->urb->hcpriv; in activate_queued_etd()
396 int etd_num = etd - &imx21->etd[0]; in activate_queued_etd()
397 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD; in activate_queued_etd()
398 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03; in activate_queued_etd()
400 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n", in activate_queued_etd()
402 etd_writel(imx21, etd_num, 1, in activate_queued_etd()
405 etd->dmem_offset = dmem_offset; in activate_queued_etd()
406 urb_priv->active = 1; in activate_queued_etd()
407 activate_etd(imx21, etd_num, dir); in activate_queued_etd()
410 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) in free_dmem() argument
417 if (!etd->dmem_size) in free_dmem()
419 etd->dmem_size = 0; in free_dmem()
421 offset = etd->dmem_offset; in free_dmem()
422 list_for_each_entry(area, &imx21->dmem_list, list) { in free_dmem()
423 if (area->offset == offset) { in free_dmem()
424 debug_dmem_freed(imx21, area->size); in free_dmem()
425 list_del(&area->list); in free_dmem()
433 dev_err(imx21->dev, in free_dmem()
439 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { in free_dmem()
440 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); in free_dmem()
442 list_del(&etd->queue); in free_dmem()
443 activate_queued_etd(imx21, etd, (u32)offset); in free_dmem()
448 static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) in free_epdmem() argument
452 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) { in free_epdmem()
453 if (area->ep == ep) { in free_epdmem()
454 dev_err(imx21->dev, in free_epdmem()
456 area->offset, ep); in free_epdmem()
457 list_del(&area->list); in free_epdmem()
468 /* Endpoint now idle - release its ETD(s) or assign to queued request */
469 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) in ep_idle() argument
474 int etd_num = ep_priv->etd[i]; in ep_idle()
479 etd = &imx21->etd[etd_num]; in ep_idle()
480 ep_priv->etd[i] = -1; in ep_idle()
482 free_dmem(imx21, etd); /* for isoc */ in ep_idle()
484 if (list_empty(&imx21->queue_for_etd)) { in ep_idle()
485 free_etd(imx21, etd_num); in ep_idle()
489 dev_dbg(imx21->dev, in ep_idle()
491 ep_priv = list_first_entry(&imx21->queue_for_etd, in ep_idle()
493 list_del(&ep_priv->queue); in ep_idle()
494 reset_etd(imx21, etd_num); in ep_idle()
495 ep_priv->waiting_etd = 0; in ep_idle()
496 ep_priv->etd[i] = etd_num; in ep_idle()
498 if (list_empty(&ep_priv->ep->urb_list)) { in ep_idle()
499 dev_err(imx21->dev, "No urb for queued ep!\n"); in ep_idle()
502 schedule_nonisoc_etd(imx21, list_first_entry( in ep_idle()
503 &ep_priv->ep->urb_list, struct urb, urb_list)); in ep_idle()
508 __releases(imx21->lock) in urb_done()
509 __acquires(imx21->lock) in urb_done()
511 struct imx21 *imx21 = hcd_to_imx21(hcd); in urb_done() local
512 struct ep_priv *ep_priv = urb->ep->hcpriv; in urb_done()
513 struct urb_priv *urb_priv = urb->hcpriv; in urb_done()
515 debug_urb_completed(imx21, urb, status); in urb_done()
516 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status); in urb_done()
518 kfree(urb_priv->isoc_td); in urb_done()
519 kfree(urb->hcpriv); in urb_done()
520 urb->hcpriv = NULL; in urb_done()
522 spin_unlock(&imx21->lock); in urb_done()
524 spin_lock(&imx21->lock); in urb_done()
525 if (list_empty(&ep_priv->ep->urb_list)) in urb_done()
526 ep_idle(imx21, ep_priv); in urb_done()
530 struct imx21 *imx21, struct etd_priv *etd, int status) in nonisoc_urb_completed_for_etd() argument
532 struct usb_host_endpoint *ep = etd->ep; in nonisoc_urb_completed_for_etd()
534 urb_done(imx21->hcd, etd->urb, status); in nonisoc_urb_completed_for_etd()
535 etd->urb = NULL; in nonisoc_urb_completed_for_etd()
537 if (!list_empty(&ep->urb_list)) { in nonisoc_urb_completed_for_etd()
539 &ep->urb_list, struct urb, urb_list); in nonisoc_urb_completed_for_etd()
541 dev_vdbg(imx21->dev, "next URB %p\n", urb); in nonisoc_urb_completed_for_etd()
542 schedule_nonisoc_etd(imx21, urb); in nonisoc_urb_completed_for_etd()
554 struct imx21 *imx21 = hcd_to_imx21(hcd); in schedule_isoc_etds() local
555 struct ep_priv *ep_priv = ep->hcpriv; in schedule_isoc_etds()
566 if (list_empty(&ep_priv->td_list)) in schedule_isoc_etds()
569 etd_num = ep_priv->etd[i]; in schedule_isoc_etds()
573 etd = &imx21->etd[etd_num]; in schedule_isoc_etds()
574 if (etd->urb) in schedule_isoc_etds()
577 td = list_entry(ep_priv->td_list.next, struct td, list); in schedule_isoc_etds()
578 list_del(&td->list); in schedule_isoc_etds()
579 urb_priv = td->urb->hcpriv; in schedule_isoc_etds()
582 if (frame_after(cur_frame, td->frame)) { in schedule_isoc_etds()
583 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n", in schedule_isoc_etds()
584 cur_frame, td->frame); in schedule_isoc_etds()
585 urb_priv->isoc_status = -EXDEV; in schedule_isoc_etds()
586 td->urb->iso_frame_desc[ in schedule_isoc_etds()
587 td->isoc_index].actual_length = 0; in schedule_isoc_etds()
588 td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV; in schedule_isoc_etds()
589 if (--urb_priv->isoc_remaining == 0) in schedule_isoc_etds()
590 urb_done(hcd, td->urb, urb_priv->isoc_status); in schedule_isoc_etds()
594 urb_priv->active = 1; in schedule_isoc_etds()
595 etd->td = td; in schedule_isoc_etds()
596 etd->ep = td->ep; in schedule_isoc_etds()
597 etd->urb = td->urb; in schedule_isoc_etds()
598 etd->len = td->len; in schedule_isoc_etds()
599 etd->dma_handle = td->dma_handle; in schedule_isoc_etds()
600 etd->cpu_buffer = td->cpu_buffer; in schedule_isoc_etds()
602 debug_isoc_submitted(imx21, cur_frame, td); in schedule_isoc_etds()
604 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN; in schedule_isoc_etds()
605 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); in schedule_isoc_etds()
606 etd_writel(imx21, etd_num, 1, etd->dmem_offset); in schedule_isoc_etds()
607 etd_writel(imx21, etd_num, 2, in schedule_isoc_etds()
609 ((td->frame & 0xFFFF) << DW2_STARTFRM)); in schedule_isoc_etds()
610 etd_writel(imx21, etd_num, 3, in schedule_isoc_etds()
612 (td->len << DW3_PKTLEN0)); in schedule_isoc_etds()
614 activate_etd(imx21, etd_num, dir); in schedule_isoc_etds()
620 struct imx21 *imx21 = hcd_to_imx21(hcd); in isoc_etd_done() local
622 struct etd_priv *etd = imx21->etd + etd_num; in isoc_etd_done()
623 struct urb *urb = etd->urb; in isoc_etd_done()
624 struct urb_priv *urb_priv = urb->hcpriv; in isoc_etd_done()
625 struct td *td = etd->td; in isoc_etd_done()
626 struct usb_host_endpoint *ep = etd->ep; in isoc_etd_done()
627 int isoc_index = td->isoc_index; in isoc_etd_done()
628 unsigned int pipe = urb->pipe; in isoc_etd_done()
633 disactivate_etd(imx21, etd_num); in isoc_etd_done()
635 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf; in isoc_etd_done()
636 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff; in isoc_etd_done()
647 debug_isoc_completed(imx21, in isoc_etd_done()
650 urb_priv->isoc_status = -EXDEV; in isoc_etd_done()
651 dev_dbg(imx21->dev, in isoc_etd_done()
654 cc, imx21_hc_get_frame(hcd), td->frame, in isoc_etd_done()
655 bytes_xfrd, td->len, urb, etd_num, isoc_index); in isoc_etd_done()
659 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); in isoc_etd_done()
660 if (!etd->dma_handle) in isoc_etd_done()
661 memcpy_fromio(etd->cpu_buffer, in isoc_etd_done()
662 imx21->regs + USBOTG_DMEM + etd->dmem_offset, in isoc_etd_done()
666 urb->actual_length += bytes_xfrd; in isoc_etd_done()
667 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; in isoc_etd_done()
668 urb->iso_frame_desc[isoc_index].status = cc_to_error[cc]; in isoc_etd_done()
670 etd->td = NULL; in isoc_etd_done()
671 etd->urb = NULL; in isoc_etd_done()
672 etd->ep = NULL; in isoc_etd_done()
674 if (--urb_priv->isoc_remaining == 0) in isoc_etd_done()
675 urb_done(hcd, urb, urb_priv->isoc_status); in isoc_etd_done()
681 struct imx21 *imx21, struct usb_host_endpoint *ep) in alloc_isoc_ep() argument
691 ep_priv->etd[i] = -1; in alloc_isoc_ep()
693 INIT_LIST_HEAD(&ep_priv->td_list); in alloc_isoc_ep()
694 ep_priv->ep = ep; in alloc_isoc_ep()
695 ep->hcpriv = ep_priv; in alloc_isoc_ep()
699 static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv) in alloc_isoc_etds() argument
706 if (ep_priv->etd[i] < 0) { in alloc_isoc_etds()
707 etd_num = alloc_etd(imx21); in alloc_isoc_etds()
711 ep_priv->etd[i] = etd_num; in alloc_isoc_etds()
712 imx21->etd[etd_num].ep = ep_priv->ep; in alloc_isoc_etds()
718 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n"); in alloc_isoc_etds()
720 free_etd(imx21, ep_priv->etd[j]); in alloc_isoc_etds()
721 ep_priv->etd[j] = -1; in alloc_isoc_etds()
723 return -ENOMEM; in alloc_isoc_etds()
730 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_urb_enqueue_isoc() local
742 return -ENOMEM; in imx21_hc_urb_enqueue_isoc()
744 urb_priv->isoc_td = kcalloc(urb->number_of_packets, sizeof(struct td), in imx21_hc_urb_enqueue_isoc()
746 if (urb_priv->isoc_td == NULL) { in imx21_hc_urb_enqueue_isoc()
747 ret = -ENOMEM; in imx21_hc_urb_enqueue_isoc()
751 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_urb_enqueue_isoc()
753 if (ep->hcpriv == NULL) { in imx21_hc_urb_enqueue_isoc()
754 ep_priv = alloc_isoc_ep(imx21, ep); in imx21_hc_urb_enqueue_isoc()
756 ret = -ENOMEM; in imx21_hc_urb_enqueue_isoc()
760 ep_priv = ep->hcpriv; in imx21_hc_urb_enqueue_isoc()
763 ret = alloc_isoc_etds(imx21, ep_priv); in imx21_hc_urb_enqueue_isoc()
771 urb->status = -EINPROGRESS; in imx21_hc_urb_enqueue_isoc()
772 urb->actual_length = 0; in imx21_hc_urb_enqueue_isoc()
773 urb->error_count = 0; in imx21_hc_urb_enqueue_isoc()
774 urb->hcpriv = urb_priv; in imx21_hc_urb_enqueue_isoc()
775 urb_priv->ep = ep; in imx21_hc_urb_enqueue_isoc()
778 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); in imx21_hc_urb_enqueue_isoc()
780 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; in imx21_hc_urb_enqueue_isoc()
782 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { in imx21_hc_urb_enqueue_isoc()
784 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n", in imx21_hc_urb_enqueue_isoc()
785 etd->dmem_size, maxpacket); in imx21_hc_urb_enqueue_isoc()
786 ret = -EMSGSIZE; in imx21_hc_urb_enqueue_isoc()
790 if (etd->dmem_size == 0) { in imx21_hc_urb_enqueue_isoc()
791 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); in imx21_hc_urb_enqueue_isoc()
792 if (etd->dmem_offset < 0) { in imx21_hc_urb_enqueue_isoc()
793 dev_dbg(imx21->dev, "failed alloc isoc dmem\n"); in imx21_hc_urb_enqueue_isoc()
794 ret = -EAGAIN; in imx21_hc_urb_enqueue_isoc()
797 etd->dmem_size = maxpacket; in imx21_hc_urb_enqueue_isoc()
804 if (list_empty(&ep_priv->td_list)) { in imx21_hc_urb_enqueue_isoc()
805 urb->start_frame = wrap_frame(cur_frame + 5); in imx21_hc_urb_enqueue_isoc()
807 urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev, in imx21_hc_urb_enqueue_isoc()
808 struct td, list)->frame + urb->interval); in imx21_hc_urb_enqueue_isoc()
810 if (frame_after(cur_frame, urb->start_frame)) { in imx21_hc_urb_enqueue_isoc()
811 dev_dbg(imx21->dev, in imx21_hc_urb_enqueue_isoc()
813 urb->start_frame, cur_frame, in imx21_hc_urb_enqueue_isoc()
814 (urb->transfer_flags & URB_ISO_ASAP) != 0); in imx21_hc_urb_enqueue_isoc()
816 cur_frame - urb->start_frame), in imx21_hc_urb_enqueue_isoc()
817 urb->interval); in imx21_hc_urb_enqueue_isoc()
820 if ((urb->transfer_flags & URB_ISO_ASAP) || in imx21_hc_urb_enqueue_isoc()
821 i >= urb->number_of_packets) { in imx21_hc_urb_enqueue_isoc()
822 urb->start_frame = wrap_frame(urb->start_frame in imx21_hc_urb_enqueue_isoc()
823 + i * urb->interval); in imx21_hc_urb_enqueue_isoc()
830 urb_priv->isoc_remaining = urb->number_of_packets - i; in imx21_hc_urb_enqueue_isoc()
831 td = urb_priv->isoc_td; in imx21_hc_urb_enqueue_isoc()
832 for (; i < urb->number_of_packets; i++, td++) { in imx21_hc_urb_enqueue_isoc()
833 unsigned int offset = urb->iso_frame_desc[i].offset; in imx21_hc_urb_enqueue_isoc()
834 td->ep = ep; in imx21_hc_urb_enqueue_isoc()
835 td->urb = urb; in imx21_hc_urb_enqueue_isoc()
836 td->len = urb->iso_frame_desc[i].length; in imx21_hc_urb_enqueue_isoc()
837 td->isoc_index = i; in imx21_hc_urb_enqueue_isoc()
838 td->frame = wrap_frame(urb->start_frame + urb->interval * i); in imx21_hc_urb_enqueue_isoc()
839 td->dma_handle = urb->transfer_dma + offset; in imx21_hc_urb_enqueue_isoc()
840 td->cpu_buffer = urb->transfer_buffer + offset; in imx21_hc_urb_enqueue_isoc()
841 list_add_tail(&td->list, &ep_priv->td_list); in imx21_hc_urb_enqueue_isoc()
844 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n", in imx21_hc_urb_enqueue_isoc()
845 urb->number_of_packets, urb->start_frame, td->frame); in imx21_hc_urb_enqueue_isoc()
847 debug_urb_submitted(imx21, urb); in imx21_hc_urb_enqueue_isoc()
850 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_urb_enqueue_isoc()
859 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_urb_enqueue_isoc()
860 kfree(urb_priv->isoc_td); in imx21_hc_urb_enqueue_isoc()
867 static void dequeue_isoc_urb(struct imx21 *imx21, in dequeue_isoc_urb() argument
870 struct urb_priv *urb_priv = urb->hcpriv; in dequeue_isoc_urb()
874 if (urb_priv->active) { in dequeue_isoc_urb()
876 int etd_num = ep_priv->etd[i]; in dequeue_isoc_urb()
877 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { in dequeue_isoc_urb()
878 struct etd_priv *etd = imx21->etd + etd_num; in dequeue_isoc_urb()
880 reset_etd(imx21, etd_num); in dequeue_isoc_urb()
881 free_dmem(imx21, etd); in dequeue_isoc_urb()
886 list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) { in dequeue_isoc_urb()
887 if (td->urb == urb) { in dequeue_isoc_urb()
888 dev_vdbg(imx21->dev, "removing td %p\n", td); in dequeue_isoc_urb()
889 list_del(&td->list); in dequeue_isoc_urb()
898 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) in schedule_nonisoc_etd() argument
900 unsigned int pipe = urb->pipe; in schedule_nonisoc_etd()
901 struct urb_priv *urb_priv = urb->hcpriv; in schedule_nonisoc_etd()
902 struct ep_priv *ep_priv = urb_priv->ep->hcpriv; in schedule_nonisoc_etd()
903 int state = urb_priv->state; in schedule_nonisoc_etd()
904 int etd_num = ep_priv->etd[0]; in schedule_nonisoc_etd()
916 dev_err(imx21->dev, "No valid ETD\n"); in schedule_nonisoc_etd()
919 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num)) in schedule_nonisoc_etd()
920 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num); in schedule_nonisoc_etd()
922 etd = &imx21->etd[etd_num]; in schedule_nonisoc_etd()
923 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe)); in schedule_nonisoc_etd()
930 if (unsuitable_for_dma(urb->setup_dma)) in schedule_nonisoc_etd()
931 usb_hcd_unmap_urb_setup_for_dma(imx21->hcd, in schedule_nonisoc_etd()
933 etd->dma_handle = urb->setup_dma; in schedule_nonisoc_etd()
934 etd->cpu_buffer = urb->setup_packet; in schedule_nonisoc_etd()
947 if (unsuitable_for_dma(urb->transfer_dma)) in schedule_nonisoc_etd()
948 usb_hcd_unmap_urb_for_dma(imx21->hcd, urb); in schedule_nonisoc_etd()
950 etd->dma_handle = urb->transfer_dma; in schedule_nonisoc_etd()
951 etd->cpu_buffer = urb->transfer_buffer; in schedule_nonisoc_etd()
955 count = urb->transfer_buffer_length; in schedule_nonisoc_etd()
961 urb->dev, in schedule_nonisoc_etd()
962 usb_pipeendpoint(urb->pipe), in schedule_nonisoc_etd()
963 usb_pipeout(urb->pipe))) in schedule_nonisoc_etd()
970 etd->urb = urb; in schedule_nonisoc_etd()
971 etd->ep = urb_priv->ep; in schedule_nonisoc_etd()
972 etd->len = count; in schedule_nonisoc_etd()
975 interval = urb->interval; in schedule_nonisoc_etd()
976 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff; in schedule_nonisoc_etd()
980 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket); in schedule_nonisoc_etd()
982 etd_writel(imx21, etd_num, 2, in schedule_nonisoc_etd()
998 etd_writel(imx21, etd_num, 3, in schedule_nonisoc_etd()
999 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count); in schedule_nonisoc_etd()
1002 etd->dma_handle = 0; in schedule_nonisoc_etd()
1005 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; in schedule_nonisoc_etd()
1006 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); in schedule_nonisoc_etd()
1007 if (etd->dmem_offset < 0) { in schedule_nonisoc_etd()
1009 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); in schedule_nonisoc_etd()
1011 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num); in schedule_nonisoc_etd()
1012 debug_urb_queued_for_dmem(imx21, urb); in schedule_nonisoc_etd()
1013 list_add_tail(&etd->queue, &imx21->queue_for_dmem); in schedule_nonisoc_etd()
1017 etd_writel(imx21, etd_num, 1, in schedule_nonisoc_etd()
1018 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | in schedule_nonisoc_etd()
1019 (u32) etd->dmem_offset); in schedule_nonisoc_etd()
1021 urb_priv->active = 1; in schedule_nonisoc_etd()
1024 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", in schedule_nonisoc_etd()
1026 activate_etd(imx21, etd_num, dir); in schedule_nonisoc_etd()
1032 struct imx21 *imx21 = hcd_to_imx21(hcd); in nonisoc_etd_done() local
1033 struct etd_priv *etd = &imx21->etd[etd_num]; in nonisoc_etd_done()
1034 struct urb *urb = etd->urb; in nonisoc_etd_done()
1036 struct urb_priv *urb_priv = urb->hcpriv; in nonisoc_etd_done()
1042 disactivate_etd(imx21, etd_num); in nonisoc_etd_done()
1044 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; in nonisoc_etd_done()
1045 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; in nonisoc_etd_done()
1046 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); in nonisoc_etd_done()
1049 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), in nonisoc_etd_done()
1050 usb_pipeout(urb->pipe), in nonisoc_etd_done()
1051 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1); in nonisoc_etd_done()
1054 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); in nonisoc_etd_done()
1055 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); in nonisoc_etd_done()
1057 if (etd->bounce_buffer) { in nonisoc_etd_done()
1058 memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd); in nonisoc_etd_done()
1059 dma_unmap_single(imx21->dev, in nonisoc_etd_done()
1060 etd->dma_handle, etd->len, DMA_FROM_DEVICE); in nonisoc_etd_done()
1061 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */ in nonisoc_etd_done()
1062 memcpy_fromio(etd->cpu_buffer, in nonisoc_etd_done()
1063 imx21->regs + USBOTG_DMEM + etd->dmem_offset, in nonisoc_etd_done()
1068 kfree(etd->bounce_buffer); in nonisoc_etd_done()
1069 etd->bounce_buffer = NULL; in nonisoc_etd_done()
1070 free_dmem(imx21, etd); in nonisoc_etd_done()
1072 urb->error_count = 0; in nonisoc_etd_done()
1073 if (!(urb->transfer_flags & URB_SHORT_NOT_OK) in nonisoc_etd_done()
1078 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc); in nonisoc_etd_done()
1082 switch (usb_pipetype(urb->pipe)) { in nonisoc_etd_done()
1084 switch (urb_priv->state) { in nonisoc_etd_done()
1086 if (urb->transfer_buffer_length > 0) in nonisoc_etd_done()
1087 urb_priv->state = US_CTRL_DATA; in nonisoc_etd_done()
1089 urb_priv->state = US_CTRL_ACK; in nonisoc_etd_done()
1092 urb->actual_length += bytes_xfrd; in nonisoc_etd_done()
1093 urb_priv->state = US_CTRL_ACK; in nonisoc_etd_done()
1099 dev_err(imx21->dev, in nonisoc_etd_done()
1100 "Invalid pipe state %d\n", urb_priv->state); in nonisoc_etd_done()
1107 urb->actual_length += bytes_xfrd; in nonisoc_etd_done()
1108 if ((urb_priv->state == US_BULK) in nonisoc_etd_done()
1109 && (urb->transfer_flags & URB_ZERO_PACKET) in nonisoc_etd_done()
1110 && urb->transfer_buffer_length > 0 in nonisoc_etd_done()
1111 && ((urb->transfer_buffer_length % in nonisoc_etd_done()
1112 usb_maxpacket(urb->dev, urb->pipe, in nonisoc_etd_done()
1113 usb_pipeout(urb->pipe))) == 0)) { in nonisoc_etd_done()
1114 /* need a 0-packet */ in nonisoc_etd_done()
1115 urb_priv->state = US_BULK0; in nonisoc_etd_done()
1122 urb->actual_length += bytes_xfrd; in nonisoc_etd_done()
1128 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]); in nonisoc_etd_done()
1130 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); in nonisoc_etd_done()
1131 schedule_nonisoc_etd(imx21, urb); in nonisoc_etd_done()
1146 ep_priv->etd[i] = -1; in alloc_ep()
1154 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_urb_enqueue() local
1155 struct usb_host_endpoint *ep = urb->ep; in imx21_hc_urb_enqueue()
1162 dev_vdbg(imx21->dev, in imx21_hc_urb_enqueue()
1166 urb->transfer_buffer_length, in imx21_hc_urb_enqueue()
1167 urb->transfer_buffer, &urb->transfer_dma, in imx21_hc_urb_enqueue()
1168 urb->setup_packet, &urb->setup_dma); in imx21_hc_urb_enqueue()
1170 if (usb_pipeisoc(urb->pipe)) in imx21_hc_urb_enqueue()
1175 return -ENOMEM; in imx21_hc_urb_enqueue()
1177 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_urb_enqueue()
1179 ep_priv = ep->hcpriv; in imx21_hc_urb_enqueue()
1183 ret = -ENOMEM; in imx21_hc_urb_enqueue()
1186 ep->hcpriv = ep_priv; in imx21_hc_urb_enqueue()
1187 ep_priv->ep = ep; in imx21_hc_urb_enqueue()
1194 urb->status = -EINPROGRESS; in imx21_hc_urb_enqueue()
1195 urb->actual_length = 0; in imx21_hc_urb_enqueue()
1196 urb->error_count = 0; in imx21_hc_urb_enqueue()
1197 urb->hcpriv = urb_priv; in imx21_hc_urb_enqueue()
1198 urb_priv->ep = ep; in imx21_hc_urb_enqueue()
1200 switch (usb_pipetype(urb->pipe)) { in imx21_hc_urb_enqueue()
1202 urb_priv->state = US_CTRL_SETUP; in imx21_hc_urb_enqueue()
1205 urb_priv->state = US_BULK; in imx21_hc_urb_enqueue()
1209 debug_urb_submitted(imx21, urb); in imx21_hc_urb_enqueue()
1210 if (ep_priv->etd[0] < 0) { in imx21_hc_urb_enqueue()
1211 if (ep_priv->waiting_etd) { in imx21_hc_urb_enqueue()
1212 dev_dbg(imx21->dev, in imx21_hc_urb_enqueue()
1215 debug_urb_queued_for_etd(imx21, urb); in imx21_hc_urb_enqueue()
1218 ep_priv->etd[0] = alloc_etd(imx21); in imx21_hc_urb_enqueue()
1219 if (ep_priv->etd[0] < 0) { in imx21_hc_urb_enqueue()
1220 dev_dbg(imx21->dev, in imx21_hc_urb_enqueue()
1222 debug_urb_queued_for_etd(imx21, urb); in imx21_hc_urb_enqueue()
1223 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd); in imx21_hc_urb_enqueue()
1224 ep_priv->waiting_etd = 1; in imx21_hc_urb_enqueue()
1230 etd = &imx21->etd[ep_priv->etd[0]]; in imx21_hc_urb_enqueue()
1231 if (etd->urb == NULL) { in imx21_hc_urb_enqueue()
1232 DEBUG_LOG_FRAME(imx21, etd, last_req); in imx21_hc_urb_enqueue()
1233 schedule_nonisoc_etd(imx21, urb); in imx21_hc_urb_enqueue()
1237 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_urb_enqueue()
1242 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_urb_enqueue()
1250 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_urb_dequeue() local
1254 struct urb_priv *urb_priv = urb->hcpriv; in imx21_hc_urb_dequeue()
1255 int ret = -EINVAL; in imx21_hc_urb_dequeue()
1257 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n", in imx21_hc_urb_dequeue()
1258 urb, usb_pipeisoc(urb->pipe), status); in imx21_hc_urb_dequeue()
1260 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_urb_dequeue()
1265 ep = urb_priv->ep; in imx21_hc_urb_dequeue()
1266 ep_priv = ep->hcpriv; in imx21_hc_urb_dequeue()
1268 debug_urb_unlinked(imx21, urb); in imx21_hc_urb_dequeue()
1270 if (usb_pipeisoc(urb->pipe)) { in imx21_hc_urb_dequeue()
1271 dequeue_isoc_urb(imx21, urb, ep_priv); in imx21_hc_urb_dequeue()
1273 } else if (urb_priv->active) { in imx21_hc_urb_dequeue()
1274 int etd_num = ep_priv->etd[0]; in imx21_hc_urb_dequeue()
1275 if (etd_num != -1) { in imx21_hc_urb_dequeue()
1276 struct etd_priv *etd = &imx21->etd[etd_num]; in imx21_hc_urb_dequeue()
1278 disactivate_etd(imx21, etd_num); in imx21_hc_urb_dequeue()
1279 free_dmem(imx21, etd); in imx21_hc_urb_dequeue()
1280 etd->urb = NULL; in imx21_hc_urb_dequeue()
1281 kfree(etd->bounce_buffer); in imx21_hc_urb_dequeue()
1282 etd->bounce_buffer = NULL; in imx21_hc_urb_dequeue()
1288 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_urb_dequeue()
1292 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_urb_dequeue()
1300 static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof) in process_etds() argument
1306 spin_lock_irqsave(&imx21->lock, flags); in process_etds()
1310 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask; in process_etds()
1311 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask; in process_etds()
1312 struct etd_priv *etd = &imx21->etd[etd_num]; in process_etds()
1316 DEBUG_LOG_FRAME(imx21, etd, last_int); in process_etds()
1337 if (etd->active_count && !enabled) /* suspicious... */ in process_etds()
1340 if (!sof || enabled || !etd->active_count) in process_etds()
1343 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE; in process_etds()
1347 if (++etd->active_count < 10) in process_etds()
1350 dword0 = etd_readl(imx21, etd_num, 0); in process_etds()
1351 dev_dbg(imx21->dev, in process_etds()
1358 dev_dbg(imx21->dev, in process_etds()
1361 etd->activated_frame, in process_etds()
1362 etd->disactivated_frame, in process_etds()
1363 etd->last_int_frame, in process_etds()
1364 etd->last_req_frame, in process_etds()
1365 readl(imx21->regs + USBH_FRMNUB)); in process_etds()
1366 imx21->debug_unblocks++; in process_etds()
1368 etd->active_count = 0; in process_etds()
1372 if (etd->ep == NULL || etd->urb == NULL) { in process_etds()
1373 dev_dbg(imx21->dev, in process_etds()
1376 etd_num, etd->ep, etd->urb); in process_etds()
1377 disactivate_etd(imx21, etd_num); in process_etds()
1381 if (usb_pipeisoc(etd->urb->pipe)) in process_etds()
1389 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); in process_etds()
1391 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); in process_etds()
1394 spin_unlock_irqrestore(&imx21->lock, flags); in process_etds()
1399 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_irq() local
1400 u32 ints = readl(imx21->regs + USBH_SYSISR); in imx21_irq()
1403 dev_dbg(imx21->dev, "Scheduling error\n"); in imx21_irq()
1406 dev_dbg(imx21->dev, "Scheduling overrun\n"); in imx21_irq()
1409 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT); in imx21_irq()
1411 writel(ints, imx21->regs + USBH_SYSISR); in imx21_irq()
1418 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_endpoint_disable() local
1426 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_endpoint_disable()
1427 ep_priv = ep->hcpriv; in imx21_hc_endpoint_disable()
1428 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv); in imx21_hc_endpoint_disable()
1430 if (!list_empty(&ep->urb_list)) in imx21_hc_endpoint_disable()
1431 dev_dbg(imx21->dev, "ep's URB list is not empty\n"); in imx21_hc_endpoint_disable()
1435 if (ep_priv->etd[i] > -1) in imx21_hc_endpoint_disable()
1436 dev_dbg(imx21->dev, "free etd %d for disable\n", in imx21_hc_endpoint_disable()
1437 ep_priv->etd[i]); in imx21_hc_endpoint_disable()
1439 free_etd(imx21, ep_priv->etd[i]); in imx21_hc_endpoint_disable()
1442 ep->hcpriv = NULL; in imx21_hc_endpoint_disable()
1446 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { in imx21_hc_endpoint_disable()
1447 dev_err(imx21->dev, in imx21_hc_endpoint_disable()
1449 free_etd(imx21, i); in imx21_hc_endpoint_disable()
1452 free_epdmem(imx21, ep); in imx21_hc_endpoint_disable()
1453 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_endpoint_disable()
1463 struct imx21 *imx21 = hcd_to_imx21(hcd); in get_hub_descriptor() local
1464 desc->bDescriptorType = USB_DT_HUB; /* HUB descriptor */ in get_hub_descriptor()
1465 desc->bHubContrCurrent = 0; in get_hub_descriptor()
1467 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA) in get_hub_descriptor()
1469 desc->bDescLength = 9; in get_hub_descriptor()
1470 desc->bPwrOn2PwrGood = 0; in get_hub_descriptor()
1471 desc->wHubCharacteristics = (__force __u16) cpu_to_le16( in get_hub_descriptor()
1475 desc->u.hs.DeviceRemovable[0] = 1 << 1; in get_hub_descriptor()
1476 desc->u.hs.DeviceRemovable[1] = ~0; in get_hub_descriptor()
1482 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_hub_status_data() local
1488 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_hub_status_data()
1489 ports = readl(imx21->regs + USBH_ROOTHUBA) in imx21_hc_hub_status_data()
1493 dev_err(imx21->dev, "ports %d > 7\n", ports); in imx21_hc_hub_status_data()
1496 if (readl(imx21->regs + USBH_PORTSTAT(i)) & in imx21_hc_hub_status_data()
1507 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_hub_status_data()
1510 dev_info(imx21->dev, "Hub status changed\n"); in imx21_hc_hub_status_data()
1518 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_hub_control() local
1524 dev_dbg(imx21->dev, "ClearHubFeature\n"); in imx21_hc_hub_control()
1527 dev_dbg(imx21->dev, " OVER_CURRENT\n"); in imx21_hc_hub_control()
1530 dev_dbg(imx21->dev, " LOCAL_POWER\n"); in imx21_hc_hub_control()
1533 dev_dbg(imx21->dev, " unknown\n"); in imx21_hc_hub_control()
1534 rc = -EINVAL; in imx21_hc_hub_control()
1540 dev_dbg(imx21->dev, "ClearPortFeature\n"); in imx21_hc_hub_control()
1543 dev_dbg(imx21->dev, " ENABLE\n"); in imx21_hc_hub_control()
1547 dev_dbg(imx21->dev, " SUSPEND\n"); in imx21_hc_hub_control()
1551 dev_dbg(imx21->dev, " POWER\n"); in imx21_hc_hub_control()
1555 dev_dbg(imx21->dev, " C_ENABLE\n"); in imx21_hc_hub_control()
1559 dev_dbg(imx21->dev, " C_SUSPEND\n"); in imx21_hc_hub_control()
1563 dev_dbg(imx21->dev, " C_CONNECTION\n"); in imx21_hc_hub_control()
1567 dev_dbg(imx21->dev, " C_OVER_CURRENT\n"); in imx21_hc_hub_control()
1571 dev_dbg(imx21->dev, " C_RESET\n"); in imx21_hc_hub_control()
1575 dev_dbg(imx21->dev, " unknown\n"); in imx21_hc_hub_control()
1576 rc = -EINVAL; in imx21_hc_hub_control()
1583 dev_dbg(imx21->dev, "GetHubDescriptor\n"); in imx21_hc_hub_control()
1588 dev_dbg(imx21->dev, " GetHubStatus\n"); in imx21_hc_hub_control()
1593 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n", in imx21_hc_hub_control()
1594 wIndex, USBH_PORTSTAT(wIndex - 1)); in imx21_hc_hub_control()
1595 *(__le32 *) buf = readl(imx21->regs + in imx21_hc_hub_control()
1596 USBH_PORTSTAT(wIndex - 1)); in imx21_hc_hub_control()
1600 dev_dbg(imx21->dev, "SetHubFeature\n"); in imx21_hc_hub_control()
1603 dev_dbg(imx21->dev, " OVER_CURRENT\n"); in imx21_hc_hub_control()
1607 dev_dbg(imx21->dev, " LOCAL_POWER\n"); in imx21_hc_hub_control()
1610 dev_dbg(imx21->dev, " unknown\n"); in imx21_hc_hub_control()
1611 rc = -EINVAL; in imx21_hc_hub_control()
1618 dev_dbg(imx21->dev, "SetPortFeature\n"); in imx21_hc_hub_control()
1621 dev_dbg(imx21->dev, " SUSPEND\n"); in imx21_hc_hub_control()
1625 dev_dbg(imx21->dev, " POWER\n"); in imx21_hc_hub_control()
1629 dev_dbg(imx21->dev, " RESET\n"); in imx21_hc_hub_control()
1633 dev_dbg(imx21->dev, " unknown\n"); in imx21_hc_hub_control()
1634 rc = -EINVAL; in imx21_hc_hub_control()
1640 dev_dbg(imx21->dev, " unknown\n"); in imx21_hc_hub_control()
1641 rc = -EINVAL; in imx21_hc_hub_control()
1646 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1)); in imx21_hc_hub_control()
1656 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_reset() local
1660 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_reset()
1665 imx21->regs + USBOTG_RST_CTRL); in imx21_hc_reset()
1669 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) { in imx21_hc_reset()
1671 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_reset()
1672 dev_err(imx21->dev, "timeout waiting for reset\n"); in imx21_hc_reset()
1673 return -ETIMEDOUT; in imx21_hc_reset()
1675 spin_unlock_irq(&imx21->lock); in imx21_hc_reset()
1677 spin_lock_irq(&imx21->lock); in imx21_hc_reset()
1679 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_reset()
1685 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_start() local
1691 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) & in imx21_hc_start()
1693 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) & in imx21_hc_start()
1696 if (imx21->pdata->host1_txenoe) in imx21_hc_start()
1699 if (!imx21->pdata->host1_xcverless) in imx21_hc_start()
1702 if (imx21->pdata->otg_ext_xcvr) in imx21_hc_start()
1706 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_start()
1709 imx21->regs + USBOTG_CLK_CTRL); in imx21_hc_start()
1710 writel(hw_mode, imx21->regs + USBOTG_HWMODE); in imx21_hc_start()
1711 writel(usb_control, imx21->regs + USBCTRL); in imx21_hc_start()
1713 imx21->regs + USB_MISCCONTROL); in imx21_hc_start()
1718 etd_writel(imx21, i, j, 0); in imx21_hc_start()
1722 imx21->regs + USBH_HOST_CTRL); in imx21_hc_start()
1725 if (imx21->pdata->enable_otg_host) in imx21_hc_start()
1727 imx21->regs + USBH_PORTSTAT(0)); in imx21_hc_start()
1729 if (imx21->pdata->enable_host1) in imx21_hc_start()
1731 imx21->regs + USBH_PORTSTAT(1)); in imx21_hc_start()
1733 if (imx21->pdata->enable_host2) in imx21_hc_start()
1735 imx21->regs + USBH_PORTSTAT(2)); in imx21_hc_start()
1738 hcd->state = HC_STATE_RUNNING; in imx21_hc_start()
1741 set_register_bits(imx21, USBH_SYSIEN, in imx21_hc_start()
1744 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); in imx21_hc_start()
1746 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_start()
1753 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_hc_stop() local
1756 spin_lock_irqsave(&imx21->lock, flags); in imx21_hc_stop()
1758 writel(0, imx21->regs + USBH_SYSIEN); in imx21_hc_stop()
1759 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); in imx21_hc_stop()
1760 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN, in imx21_hc_stop()
1762 spin_unlock_irqrestore(&imx21->lock, flags); in imx21_hc_stop()
1771 .product_desc = "IMX21 USB Host Controller",
1772 .hcd_priv_size = sizeof(struct imx21),
1807 struct imx21 *imx21 = hcd_to_imx21(hcd); in imx21_remove() local
1810 remove_debug_files(imx21); in imx21_remove()
1814 clk_disable_unprepare(imx21->clk); in imx21_remove()
1815 clk_put(imx21->clk); in imx21_remove()
1816 iounmap(imx21->regs); in imx21_remove()
1817 release_mem_region(res->start, resource_size(res)); in imx21_remove()
1828 struct imx21 *imx21; in imx21_probe() local
1837 return -ENODEV; in imx21_probe()
1843 &pdev->dev, dev_name(&pdev->dev)); in imx21_probe()
1845 dev_err(&pdev->dev, "Cannot create hcd (%s)\n", in imx21_probe()
1846 dev_name(&pdev->dev)); in imx21_probe()
1847 return -ENOMEM; in imx21_probe()
1850 imx21 = hcd_to_imx21(hcd); in imx21_probe()
1851 imx21->hcd = hcd; in imx21_probe()
1852 imx21->dev = &pdev->dev; in imx21_probe()
1853 imx21->pdata = dev_get_platdata(&pdev->dev); in imx21_probe()
1854 if (!imx21->pdata) in imx21_probe()
1855 imx21->pdata = &default_pdata; in imx21_probe()
1857 spin_lock_init(&imx21->lock); in imx21_probe()
1858 INIT_LIST_HEAD(&imx21->dmem_list); in imx21_probe()
1859 INIT_LIST_HEAD(&imx21->queue_for_etd); in imx21_probe()
1860 INIT_LIST_HEAD(&imx21->queue_for_dmem); in imx21_probe()
1861 create_debug_files(imx21); in imx21_probe()
1863 res = request_mem_region(res->start, resource_size(res), hcd_name); in imx21_probe()
1865 ret = -EBUSY; in imx21_probe()
1869 imx21->regs = ioremap(res->start, resource_size(res)); in imx21_probe()
1870 if (imx21->regs == NULL) { in imx21_probe()
1871 dev_err(imx21->dev, "Cannot map registers\n"); in imx21_probe()
1872 ret = -ENOMEM; in imx21_probe()
1877 imx21->clk = clk_get(imx21->dev, NULL); in imx21_probe()
1878 if (IS_ERR(imx21->clk)) { in imx21_probe()
1879 dev_err(imx21->dev, "no clock found\n"); in imx21_probe()
1880 ret = PTR_ERR(imx21->clk); in imx21_probe()
1884 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000)); in imx21_probe()
1887 ret = clk_prepare_enable(imx21->clk); in imx21_probe()
1891 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n", in imx21_probe()
1892 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF); in imx21_probe()
1896 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret); in imx21_probe()
1899 device_wakeup_enable(hcd->self.controller); in imx21_probe()
1904 clk_disable_unprepare(imx21->clk); in imx21_probe()
1907 clk_put(imx21->clk); in imx21_probe()
1909 iounmap(imx21->regs); in imx21_probe()
1911 release_mem_region(res->start, resource_size(res)); in imx21_probe()
1913 remove_debug_files(imx21); in imx21_probe()
1933 MODULE_ALIAS("platform:imx21-hcd");