Lines Matching full:ehci

6 /* this file is part of ehci-hcd.c */
11 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
20 * an ongoing challenge. That's in "ehci-sched.c".
33 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, in qtd_fill() argument
41 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); in qtd_fill()
42 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); in qtd_fill()
53 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); in qtd_fill()
54 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, in qtd_fill()
67 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); in qtd_fill()
76 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) in qh_update() argument
83 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); in qh_update()
84 hw->hw_alt_next = EHCI_LIST_END(ehci); in qh_update()
91 if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) { in qh_update()
95 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; in qh_update()
97 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); in qh_update()
102 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); in qh_update()
110 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) in qh_refresh() argument
123 if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { in qh_refresh()
126 ehci_warn(ehci, "qh %p should be inactive!\n", qh); in qh_refresh()
128 qh_update(ehci, qh, qtd); in qh_refresh()
135 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
140 struct ehci_hcd *ehci = hcd_to_ehci(hcd); in ehci_clear_tt_buffer_complete() local
144 spin_lock_irqsave(&ehci->lock, flags); in ehci_clear_tt_buffer_complete()
147 && ehci->rh_state == EHCI_RH_RUNNING) in ehci_clear_tt_buffer_complete()
148 qh_link_async(ehci, qh); in ehci_clear_tt_buffer_complete()
149 spin_unlock_irqrestore(&ehci->lock, flags); in ehci_clear_tt_buffer_complete()
152 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, in ehci_clear_tt_buffer() argument
170 if (!ehci_is_TDI(ehci) in ehci_clear_tt_buffer()
172 ehci_to_hcd(ehci)->self.root_hub) { in ehci_clear_tt_buffer()
185 struct ehci_hcd *ehci, in qtd_copy_status() argument
212 * EHCI Specification, Table 4-13. in qtd_copy_status()
234 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", in qtd_copy_status()
248 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) in ehci_urb_done() argument
252 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; in ehci_urb_done()
256 INCR(ehci->stats.unlink); in ehci_urb_done()
261 INCR(ehci->stats.complete); in ehci_urb_done()
265 ehci_dbg (ehci, in ehci_urb_done()
274 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); in ehci_urb_done()
275 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); in ehci_urb_done()
278 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
286 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) in qh_completions() argument
330 ehci_urb_done(ehci, last->urb, last_status); in qh_completions()
333 ehci_qtd_free (ehci, last); in qh_completions()
343 token = hc32_to_cpu(ehci, qtd->hw_token); in qh_completions()
351 ehci_dbg(ehci, in qh_completions()
372 ehci_dbg(ehci, in qh_completions()
384 qtd->hw_token = cpu_to_hc32(ehci, in qh_completions()
387 hw->hw_token = cpu_to_hc32(ehci, in qh_completions()
405 & EHCI_LIST_END(ehci))) { in qh_completions()
412 && ehci->rh_state >= EHCI_RH_RUNNING)) { in qh_completions()
420 if (ehci->rh_state < EHCI_RH_RUNNING) { in qh_completions()
441 (hw->hw_token & ACTIVE_BIT(ehci))) { in qh_completions()
442 token = hc32_to_cpu(ehci, hw->hw_token); in qh_completions()
443 hw->hw_token &= ~ACTIVE_BIT(ehci); in qh_completions()
450 ehci_clear_tt_buffer(ehci, qh, urb, token); in qh_completions()
462 last_status = qtd_copy_status(ehci, urb, in qh_completions()
466 & EHCI_LIST_END(ehci))) in qh_completions()
484 ehci_clear_tt_buffer(ehci, qh, urb, in qh_completions()
508 ehci_urb_done(ehci, last->urb, last_status); in qh_completions()
509 ehci_qtd_free (ehci, last); in qh_completions()
539 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) in qh_completions()
553 struct ehci_hcd *ehci, in qtd_list_free() argument
564 ehci_qtd_free (ehci, qtd); in qtd_list_free()
573 struct ehci_hcd *ehci, in qh_urb_transaction() argument
589 qtd = ehci_qtd_alloc (ehci, flags); in qh_urb_transaction()
603 qtd_fill(ehci, qtd, urb->setup_dma, in qh_urb_transaction()
610 qtd = ehci_qtd_alloc (ehci, flags); in qh_urb_transaction()
614 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); in qh_urb_transaction()
654 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, in qh_urb_transaction()
666 qtd->hw_alt_next = ehci->async->hw->hw_alt_next; in qh_urb_transaction()
681 qtd = ehci_qtd_alloc (ehci, flags); in qh_urb_transaction()
685 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); in qh_urb_transaction()
696 qtd->hw_alt_next = EHCI_LIST_END(ehci); in qh_urb_transaction()
717 qtd = ehci_qtd_alloc (ehci, flags); in qh_urb_transaction()
721 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); in qh_urb_transaction()
725 qtd_fill(ehci, qtd, 0, 0, token, 0); in qh_urb_transaction()
731 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); in qh_urb_transaction()
735 qtd_list_free (ehci, urb, head); in qh_urb_transaction()
758 struct ehci_hcd *ehci, in qh_make() argument
762 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); in qh_make()
790 ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp); in qh_make()
819 } else if (urb->interval > ehci->periodic_size << 3) { in qh_make()
820 urb->interval = ehci->periodic_size << 3; in qh_make()
851 if (urb->interval > ehci->periodic_size) in qh_make()
852 urb->interval = ehci->periodic_size; in qh_make()
891 if (ehci_has_fsl_portno_bug(ehci)) in qh_make()
899 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) in qh_make()
929 ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev, in qh_make()
932 qh_destroy(ehci, qh); in qh_make()
941 hw->hw_info1 = cpu_to_hc32(ehci, info1); in qh_make()
942 hw->hw_info2 = cpu_to_hc32(ehci, info2); in qh_make()
950 static void enable_async(struct ehci_hcd *ehci) in enable_async() argument
952 if (ehci->async_count++) in enable_async()
956 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC); in enable_async()
959 ehci_poll_ASS(ehci); in enable_async()
960 turn_on_io_watchdog(ehci); in enable_async()
963 static void disable_async(struct ehci_hcd *ehci) in disable_async() argument
965 if (--ehci->async_count) in disable_async()
969 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || in disable_async()
970 !list_empty(&ehci->async_idle)); in disable_async()
973 ehci_poll_ASS(ehci); in disable_async()
978 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) in qh_link_async() argument
980 __hc32 dma = QH_NEXT(ehci, qh->qh_dma); in qh_link_async()
990 qh_refresh(ehci, qh); in qh_link_async()
993 head = ehci->async; in qh_link_async()
1006 enable_async(ehci); in qh_link_async()
1018 struct ehci_hcd *ehci, in qh_append_tds() argument
1026 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); in qh_append_tds()
1030 /* can't sleep here, we have ehci->lock... */ in qh_append_tds()
1031 qh = qh_make (ehci, urb, GFP_ATOMIC); in qh_append_tds()
1065 qtd->hw_token = HALT_BIT(ehci); in qh_append_tds()
1077 ehci_qtd_init(ehci, qtd, qtd->qtd_dma); in qh_append_tds()
1084 qtd->hw_next = QTD_NEXT(ehci, dma); in qh_append_tds()
1100 struct ehci_hcd *ehci, in submit_async() argument
1116 ehci_dbg(ehci, in submit_async()
1125 spin_lock_irqsave (&ehci->lock, flags); in submit_async()
1126 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { in submit_async()
1130 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); in submit_async()
1134 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); in submit_async()
1136 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); in submit_async()
1145 qh_link_async(ehci, qh); in submit_async()
1147 spin_unlock_irqrestore (&ehci->lock, flags); in submit_async()
1149 qtd_list_free (ehci, urb, qtd_list); in submit_async()
1170 struct ehci_hcd *ehci = hcd_to_ehci(hcd); in ehci_submit_single_step_set_feature() local
1183 qtd = ehci_qtd_alloc(ehci, GFP_KERNEL); in ehci_submit_single_step_set_feature()
1200 qtd_fill(ehci, qtd, urb->setup_dma, in ehci_submit_single_step_set_feature()
1204 submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); in ehci_submit_single_step_set_feature()
1219 qtd_fill(ehci, qtd, buf, len, token, maxpacket); in ehci_submit_single_step_set_feature()
1225 qtd->hw_alt_next = EHCI_LIST_END(ehci); in ehci_submit_single_step_set_feature()
1232 qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC); in ehci_submit_single_step_set_feature()
1236 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); in ehci_submit_single_step_set_feature()
1240 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0); in ehci_submit_single_step_set_feature()
1242 submit_async(ehci, urb, &qtd_list, GFP_KERNEL); in ehci_submit_single_step_set_feature()
1247 qtd_list_free(ehci, urb, head); in ehci_submit_single_step_set_feature()
1254 static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) in single_unlink_async() argument
1260 list_add_tail(&qh->unlink_node, &ehci->async_unlink); in single_unlink_async()
1263 prev = ehci->async; in single_unlink_async()
1269 if (ehci->qh_scan_next == qh) in single_unlink_async()
1270 ehci->qh_scan_next = qh->qh_next.qh; in single_unlink_async()
1273 static void start_iaa_cycle(struct ehci_hcd *ehci) in start_iaa_cycle() argument
1276 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { in start_iaa_cycle()
1277 end_unlink_async(ehci); in start_iaa_cycle()
1280 } else if (ehci->rh_state == EHCI_RH_RUNNING && in start_iaa_cycle()
1281 !ehci->iaa_in_progress) { in start_iaa_cycle()
1286 ehci_writel(ehci, ehci->command | CMD_IAAD, in start_iaa_cycle()
1287 &ehci->regs->command); in start_iaa_cycle()
1288 ehci_readl(ehci, &ehci->regs->command); in start_iaa_cycle()
1289 ehci->iaa_in_progress = true; in start_iaa_cycle()
1290 ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true); in start_iaa_cycle()
1294 static void end_iaa_cycle(struct ehci_hcd *ehci) in end_iaa_cycle() argument
1296 if (ehci->has_synopsys_hc_bug) in end_iaa_cycle()
1297 ehci_writel(ehci, (u32) ehci->async->qh_dma, in end_iaa_cycle()
1298 &ehci->regs->async_next); in end_iaa_cycle()
1301 ehci->iaa_in_progress = false; in end_iaa_cycle()
1303 end_unlink_async(ehci); in end_iaa_cycle()
1308 static void end_unlink_async(struct ehci_hcd *ehci) in end_unlink_async() argument
1313 if (list_empty(&ehci->async_unlink)) in end_unlink_async()
1315 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh, in end_unlink_async()
1322 early_exit = ehci->async_unlinking; in end_unlink_async()
1325 if (ehci->rh_state < EHCI_RH_RUNNING) in end_unlink_async()
1326 list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle); in end_unlink_async()
1338 list_move_tail(&qh->unlink_node, &ehci->async_idle); in end_unlink_async()
1348 * The EHCI spec (4.8.2) says that active QHs must not be removed in end_unlink_async()
1365 else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT)) in end_unlink_async()
1374 if (qh_current != ehci->old_current || in end_unlink_async()
1375 qh_token != ehci->old_token) { in end_unlink_async()
1376 ehci->old_current = qh_current; in end_unlink_async()
1377 ehci->old_token = qh_token; in end_unlink_async()
1378 ehci_enable_event(ehci, in end_unlink_async()
1386 ehci->old_current = ~0; /* Prepare for next QH */ in end_unlink_async()
1389 if (!list_empty(&ehci->async_unlink)) in end_unlink_async()
1390 start_iaa_cycle(ehci); in end_unlink_async()
1400 ehci->async_unlinking = true; in end_unlink_async()
1401 while (!list_empty(&ehci->async_idle)) { in end_unlink_async()
1402 qh = list_first_entry(&ehci->async_idle, struct ehci_qh, in end_unlink_async()
1410 qh_completions(ehci, qh); in end_unlink_async()
1412 ehci->rh_state == EHCI_RH_RUNNING) in end_unlink_async()
1413 qh_link_async(ehci, qh); in end_unlink_async()
1414 disable_async(ehci); in end_unlink_async()
1416 ehci->async_unlinking = false; in end_unlink_async()
1419 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1421 static void unlink_empty_async(struct ehci_hcd *ehci) in unlink_empty_async() argument
1428 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { in unlink_empty_async()
1432 if (qh->unlink_cycle != ehci->async_unlink_cycle) in unlink_empty_async()
1438 if (list_empty(&ehci->async_unlink) && qh_to_unlink) { in unlink_empty_async()
1440 start_unlink_async(ehci, qh_to_unlink); in unlink_empty_async()
1446 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); in unlink_empty_async()
1447 ++ehci->async_unlink_cycle; in unlink_empty_async()
1454 static void unlink_empty_async_suspended(struct ehci_hcd *ehci) in unlink_empty_async_suspended() argument
1458 while (ehci->async->qh_next.qh) { in unlink_empty_async_suspended()
1459 qh = ehci->async->qh_next.qh; in unlink_empty_async_suspended()
1461 single_unlink_async(ehci, qh); in unlink_empty_async_suspended()
1468 /* caller must own ehci->lock */
1470 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) in start_unlink_async() argument
1476 single_unlink_async(ehci, qh); in start_unlink_async()
1477 start_iaa_cycle(ehci); in start_unlink_async()
1482 static void scan_async (struct ehci_hcd *ehci) in scan_async() argument
1487 ehci->qh_scan_next = ehci->async->qh_next.qh; in scan_async()
1488 while (ehci->qh_scan_next) { in scan_async()
1489 qh = ehci->qh_scan_next; in scan_async()
1490 ehci->qh_scan_next = qh->qh_next.qh; in scan_async()
1498 * drops the lock. That's why ehci->qh_scan_next in scan_async()
1500 * gets unlinked then ehci->qh_scan_next is adjusted in scan_async()
1503 temp = qh_completions(ehci, qh); in scan_async()
1505 start_unlink_async(ehci, qh); in scan_async()
1508 qh->unlink_cycle = ehci->async_unlink_cycle; in scan_async()
1520 if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING && in scan_async()
1521 !(ehci->enabled_hrtimer_events & in scan_async()
1523 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); in scan_async()
1524 ++ehci->async_unlink_cycle; in scan_async()