Searched refs:TRBS_PER_SEGMENT (Results 1 – 19 of 19) sorted by relevance
126 if (TRBS_PER_SEGMENT > 40) { in cdns2_raw_ring()128 "\t\tTransfer ring %d too big\n", TRBS_PER_SEGMENT); in cdns2_raw_ring()133 for (i = 0; i < TRBS_PER_SEGMENT; ++i) { in cdns2_raw_ring()
141 link_trb = (ring->trbs + (TRBS_PER_SEGMENT - 1)); in cdns2_alloc_tr_segment()188 cdns2_ep_inc_trb(&ring->enqueue, &ring->pcs, TRBS_PER_SEGMENT); in cdns2_ep_inc_enq()194 cdns2_ep_inc_trb(&ring->dequeue, &ring->ccs, TRBS_PER_SEGMENT); in cdns2_ep_inc_deq()230 if (trb == (pep->ring.trbs + (TRBS_PER_SEGMENT - 1))) in cdns2_next_trb()332 if ((ring->enqueue + num_trbs) >= (TRBS_PER_SEGMENT - 1)) { in cdns2_prepare_ring()337 if (doorbell && dma_index == TRBS_PER_SEGMENT - 1) { in cdns2_prepare_ring()343 link_trb = ring->trbs + (TRBS_PER_SEGMENT - 1); in cdns2_prepare_ring()353 if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2) in cdns2_prepare_ring()366 struct cdns2_trb *link_trb = pep->ring.trbs + (TRBS_PER_SEGMENT - 1); in cdns2_dbg_request_trbs()735 trb = &pep->ring.trbs[TRBS_PER_SEGMENT]; in cdns2_prepare_first_isoc_transfer()[all …]
417 #define TRBS_PER_SEGMENT 600 macro431 #if TRBS_PER_SEGMENT < 2455 #define TR_SEG_SIZE (TRB_SIZE * (TRBS_PER_SEGMENT + TRB_ISO_RESERVED))
445 (TRBS_PER_SEGMENT * 65) + CDNS2_MSG_MAX)458 (TRBS_PER_SEGMENT * 65) + CDNS2_MSG_MAX))
56 for (i = 0; i < TRBS_PER_SEGMENT; i++) in cdnsp_segment_alloc()118 link = &prev->trbs[TRBS_PER_SEGMENT - 1].link; in cdnsp_link_segments()151 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; in cdnsp_link_rings()154 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= in cdnsp_link_rings()156 last->trbs[TRBS_PER_SEGMENT - 1].link.control |= in cdnsp_link_rings()318 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; in cdnsp_initialize_ring_info()399 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= in cdnsp_ring_alloc()432 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / in cdnsp_ring_expansion()433 (TRBS_PER_SEGMENT - 1); in cdnsp_ring_expansion()1046 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); in cdnsp_alloc_erst()
973 #define TRBS_PER_SEGMENT 600 macro979 #if TRBS_PER_SEGMENT < 2999 TRBS_PER_ISOC_SEGMENT : TRBS_PER_SEGMENT)1015 #define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT)
138 if (trb_per_sector > TRBS_PER_SEGMENT) { in cdns3_dbg_ring()
1145 #define TRBS_PER_SEGMENT 256 macro1148 #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16)
76 if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT) in cdnsp_trb_virt_to_dma()94 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in cdnsp_last_trb_on_seg()550 temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1]; in cdnsp_trb_in_td()
438 GET_TRBS_PER_SEGMENT(priv_ep->type) > TRBS_PER_SEGMENT ?
1187 TRBS_PER_SEGMENT > 2) in cdns3_ep_run_transfer()1524 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { in cdns3_trb_handled()1615 TRBS_PER_SEGMENT == 2) in cdns3_transfer_completed()
1372 sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1)); in cdnsp_clear_cmd_ring()
61 for (i = 0; i < TRBS_PER_SEGMENT; i++) in xhci_segment_alloc()112 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = in xhci_link_segments()116 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); in xhci_link_segments()121 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); in xhci_link_segments()148 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control in xhci_link_rings()150 last->trbs[TRBS_PER_SEGMENT-1].link.control in xhci_link_rings()321 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; in xhci_initialize_ring_info()403 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= in xhci_ring_alloc()1788 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); in xhci_alloc_erst()
78 if (segment_offset >= TRBS_PER_SEGMENT) in xhci_trb_virt_to_dma()95 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in last_trb_on_seg()300 return ring->num_segs * (TRBS_PER_SEGMENT - 1); in xhci_num_trbs_free()305 last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_num_trbs_free()331 trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); in xhci_ring_expansion_needed()346 new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1)); in xhci_ring_expansion_needed()2085 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); in trb_in_td()3064 if (event_loop++ > TRBS_PER_SEGMENT / 2) { in xhci_handle_events()
407 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT); in dbc_erst_alloc()468 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_dbc_ring_alloc()813 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) { in inc_evt_deq()
1243 #define TRBS_PER_SEGMENT 256 macro1245 #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)1246 #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
204 for (i = 0; i < TRBS_PER_SEGMENT; i++) { in xhci_ring_dump_segment()
791 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); in xhci_clear_command_ring()792 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= in xhci_clear_command_ring()
401 if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) { in xdbc_queue_trb()824 if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) { in xdbc_handle_events()