Lines Matching refs:tq

116 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)  in vmxnet3_tq_stopped()  argument
118 return tq->stopped; in vmxnet3_tq_stopped()
123 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_start() argument
125 tq->stopped = false; in vmxnet3_tq_start()
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
131 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_wake() argument
133 tq->stopped = false; in vmxnet3_tq_wake()
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
139 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stop() argument
141 tq->stopped = true; in vmxnet3_tq_stop()
142 tq->num_stop++; in vmxnet3_tq_stop()
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
157 vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate) in vmxnet3_apply_timestamp() argument
161 if (tq->tsPktCount == 1) { in vmxnet3_apply_timestamp()
163 tq->tsPktCount = rate; in vmxnet3_apply_timestamp()
166 tq->tsPktCount--; in vmxnet3_apply_timestamp()
384 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, in vmxnet3_unmap_pkt() argument
393 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); in vmxnet3_unmap_pkt()
394 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); in vmxnet3_unmap_pkt()
396 tbi = &tq->buf_info[eop_idx]; in vmxnet3_unmap_pkt()
399 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); in vmxnet3_unmap_pkt()
401 while (tq->tx_ring.next2comp != eop_idx) { in vmxnet3_unmap_pkt()
402 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, in vmxnet3_unmap_pkt()
410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_unmap_pkt()
427 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_tx_complete() argument
437 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
438 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { in vmxnet3_tq_tx_complete()
445 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
448 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); in vmxnet3_tq_tx_complete()
449 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
455 spin_lock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
456 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && in vmxnet3_tq_tx_complete()
457 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > in vmxnet3_tq_tx_complete()
458 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && in vmxnet3_tq_tx_complete()
460 vmxnet3_tq_wake(tq, adapter); in vmxnet3_tq_tx_complete()
462 spin_unlock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
469 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_cleanup() argument
479 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { in vmxnet3_tq_cleanup()
482 tbi = tq->buf_info + tq->tx_ring.next2comp; in vmxnet3_tq_cleanup()
493 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_tq_cleanup()
500 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_cleanup()
501 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE); in vmxnet3_tq_cleanup()
503 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
504 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_cleanup()
506 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
507 tq->comp_ring.next2proc = 0; in vmxnet3_tq_cleanup()
512 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_destroy() argument
515 if (tq->tx_ring.base) { in vmxnet3_tq_destroy()
516 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
518 tq->tx_ring.base, tq->tx_ring.basePA); in vmxnet3_tq_destroy()
519 tq->tx_ring.base = NULL; in vmxnet3_tq_destroy()
521 if (tq->data_ring.base) { in vmxnet3_tq_destroy()
523 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_destroy()
524 tq->data_ring.base, tq->data_ring.basePA); in vmxnet3_tq_destroy()
525 tq->data_ring.base = NULL; in vmxnet3_tq_destroy()
527 if (tq->ts_ring.base) { in vmxnet3_tq_destroy()
529 tq->tx_ring.size * tq->tx_ts_desc_size, in vmxnet3_tq_destroy()
530 tq->ts_ring.base, tq->ts_ring.basePA); in vmxnet3_tq_destroy()
531 tq->ts_ring.base = NULL; in vmxnet3_tq_destroy()
533 if (tq->comp_ring.base) { in vmxnet3_tq_destroy()
534 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
536 tq->comp_ring.base, tq->comp_ring.basePA); in vmxnet3_tq_destroy()
537 tq->comp_ring.base = NULL; in vmxnet3_tq_destroy()
539 kfree(tq->buf_info); in vmxnet3_tq_destroy()
540 tq->buf_info = NULL; in vmxnet3_tq_destroy()
556 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_init() argument
562 memset(tq->tx_ring.base, 0, tq->tx_ring.size * in vmxnet3_tq_init()
564 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_init()
565 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
567 memset(tq->data_ring.base, 0, in vmxnet3_tq_init()
568 tq->data_ring.size * tq->txdata_desc_size); in vmxnet3_tq_init()
570 if (tq->ts_ring.base) in vmxnet3_tq_init()
571 memset(tq->ts_ring.base, 0, in vmxnet3_tq_init()
572 tq->tx_ring.size * tq->tx_ts_desc_size); in vmxnet3_tq_init()
575 memset(tq->comp_ring.base, 0, tq->comp_ring.size * in vmxnet3_tq_init()
577 tq->comp_ring.next2proc = 0; in vmxnet3_tq_init()
578 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
581 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); in vmxnet3_tq_init()
582 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_init()
583 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; in vmxnet3_tq_init()
590 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_create() argument
593 BUG_ON(tq->tx_ring.base || tq->data_ring.base || in vmxnet3_tq_create()
594 tq->comp_ring.base || tq->buf_info); in vmxnet3_tq_create()
596 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
597 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), in vmxnet3_tq_create()
598 &tq->tx_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
599 if (!tq->tx_ring.base) { in vmxnet3_tq_create()
604 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
605 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_create()
606 &tq->data_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
607 if (!tq->data_ring.base) { in vmxnet3_tq_create()
612 if (tq->tx_ts_desc_size != 0) { in vmxnet3_tq_create()
613 tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
614 tq->tx_ring.size * tq->tx_ts_desc_size, in vmxnet3_tq_create()
615 &tq->ts_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
616 if (!tq->ts_ring.base) { in vmxnet3_tq_create()
618 tq->tx_ts_desc_size = 0; in vmxnet3_tq_create()
621 tq->ts_ring.base = NULL; in vmxnet3_tq_create()
624 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
625 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), in vmxnet3_tq_create()
626 &tq->comp_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
627 if (!tq->comp_ring.base) { in vmxnet3_tq_create()
632 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]), in vmxnet3_tq_create()
635 if (!tq->buf_info) in vmxnet3_tq_create()
641 vmxnet3_tq_destroy(tq, adapter); in vmxnet3_tq_create()
783 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, in vmxnet3_map_pkt() argument
795 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
797 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
802 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + in vmxnet3_map_pkt()
803 tq->tx_ring.next2fill * in vmxnet3_map_pkt()
804 tq->txdata_desc_size); in vmxnet3_map_pkt()
808 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
813 tq->tx_ring.next2fill, in vmxnet3_map_pkt()
816 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
819 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
836 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
846 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
847 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
855 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
857 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
858 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
871 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
888 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
889 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
897 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
899 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
900 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
911 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; in vmxnet3_map_pkt()
912 if (tq->tx_ts_desc_size != 0) { in vmxnet3_map_pkt()
913 ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base + in vmxnet3_map_pkt()
914 tbi->sop_idx * tq->tx_ts_desc_size); in vmxnet3_map_pkt()
951 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_parse_hdr() argument
1020 tq->txdata_desc_size, in vmxnet3_parse_hdr()
1024 if (skb->len <= tq->txdata_desc_size) in vmxnet3_parse_hdr()
1032 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { in vmxnet3_parse_hdr()
1033 tq->stats.oversized_hdr++; in vmxnet3_parse_hdr()
1054 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_copy_hdr() argument
1060 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + in vmxnet3_copy_hdr()
1061 tq->tx_ring.next2fill * in vmxnet3_copy_hdr()
1062 tq->txdata_desc_size); in vmxnet3_copy_hdr()
1067 ctx->copy_size, tq->tx_ring.next2fill); in vmxnet3_copy_hdr()
1134 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_tq_xmit() argument
1159 tq->stats.drop_tso++; in vmxnet3_tq_xmit()
1162 tq->stats.copy_skb_header++; in vmxnet3_tq_xmit()
1169 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1172 tq->stats.linearized++; in vmxnet3_tq_xmit()
1177 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1193 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1196 tq->stats.linearized++; in vmxnet3_tq_xmit()
1203 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1210 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1218 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1224 tq->stats.drop_hdr_inspect_err++; in vmxnet3_tq_xmit()
1228 spin_lock_irqsave(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1230 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { in vmxnet3_tq_xmit()
1231 tq->stats.tx_ring_full++; in vmxnet3_tq_xmit()
1235 tq->tx_ring.next2comp, tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1237 vmxnet3_tq_stop(tq, adapter); in vmxnet3_tq_xmit()
1238 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1243 vmxnet3_copy_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1246 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1260 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); in vmxnet3_tq_xmit()
1307 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); in vmxnet3_tq_xmit()
1315 if (tq->tx_ts_desc_size != 0 && in vmxnet3_tq_xmit()
1317 if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) { in vmxnet3_tq_xmit()
1342 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), in vmxnet3_tq_xmit()
1345 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1347 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { in vmxnet3_tq_xmit()
1348 tq->shared->txNumDeferred = 0; in vmxnet3_tq_xmit()
1350 adapter->tx_prod_offset + tq->qid * 8, in vmxnet3_tq_xmit()
1351 tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1357 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1359 tq->stats.drop_total++; in vmxnet3_tq_xmit()
2363 struct vmxnet3_tx_queue *tq = in vmxnet3_poll_rx_only() local
2365 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_poll_rx_only()
2388 struct vmxnet3_tx_queue *tq = data; in vmxnet3_msix_tx() local
2389 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx()
2392 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2402 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_msix_tx()
2404 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2903 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared() local
2906 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); in vmxnet3_setup_driver_shared()
2907 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2908 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2910 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); in vmxnet3_setup_driver_shared()
2911 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); in vmxnet3_setup_driver_shared()
2912 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); in vmxnet3_setup_driver_shared()
2913 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); in vmxnet3_setup_driver_shared()
2915 tqc->intrIdx = tq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2918 tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA); in vmxnet3_setup_driver_shared()
2919 tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size); in vmxnet3_setup_driver_shared()
3401 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues() local
3402 tq->tx_ring.size = tx_ring_size; in vmxnet3_create_queues()
3403 tq->data_ring.size = tx_ring_size; in vmxnet3_create_queues()
3404 tq->comp_ring.size = tx_ring_size; in vmxnet3_create_queues()
3405 tq->txdata_desc_size = txdata_desc_size; in vmxnet3_create_queues()
3406 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
3407 tq->stopped = true; in vmxnet3_create_queues()
3408 tq->adapter = adapter; in vmxnet3_create_queues()
3409 tq->qid = i; in vmxnet3_create_queues()
3410 tq->tx_ts_desc_size = adapter->tx_ts_desc_size; in vmxnet3_create_queues()
3411 tq->tsPktCount = 1; in vmxnet3_create_queues()
3412 err = vmxnet3_tq_create(tq, adapter); in vmxnet3_create_queues()