Lines Matching +full:1 +full:q
66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
141 dma_addr_t addr[MAX_SKB_FRAGS + 1];
148 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
154 #if SGE_NUM_GENBITS == 1
155 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
160 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
165 # error "SGE_NUM_GENBITS must be 1 or 2"
169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
171 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
182 * @q: the response queue to replenish
189 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
205 return 1; in need_skb_unmap()
214 * @q: the Tx queue containing Tx descriptors for the packet
233 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
237 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
240 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb()
246 j = 1; in unmap_skb()
249 curflit = d->sflit + 1 + j; in unmap_skb()
256 j ^= 1; in unmap_skb()
266 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb()
269 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ in unmap_skb()
276 * @q: the Tx queue to reclaim descriptors from
282 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, in free_tx_desc() argument
287 unsigned int cidx = q->cidx; in free_tx_desc()
290 q->cntxt_id >= FW_TUNNEL_SGEEC_START; in free_tx_desc()
292 d = &q->sdesc[cidx]; in free_tx_desc()
296 unmap_skb(d->skb, q, cidx, pdev); in free_tx_desc()
303 if (++cidx == q->size) { in free_tx_desc()
305 d = q->sdesc; in free_tx_desc()
308 q->cidx = cidx; in free_tx_desc()
314 * @q: the Tx queue to reclaim completed descriptors from
322 struct sge_txq *q, in reclaim_completed_tx() argument
325 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx()
329 free_tx_desc(adapter, q, reclaim); in reclaim_completed_tx()
330 q->cleaned += reclaim; in reclaim_completed_tx()
331 q->in_use -= reclaim; in reclaim_completed_tx()
333 return q->processed - q->cleaned; in reclaim_completed_tx()
338 * @q: the Tx queue
342 static inline int should_restart_tx(const struct sge_txq *q) in should_restart_tx() argument
344 unsigned int r = q->processed - q->cleaned; in should_restart_tx()
346 return q->in_use - r < (q->size >> 1); in should_restart_tx()
349 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, in clear_rx_desc() argument
352 if (q->use_pages && d->pg_chunk.page) { in clear_rx_desc()
356 q->alloc_size, DMA_FROM_DEVICE); in clear_rx_desc()
362 q->buf_size, DMA_FROM_DEVICE); in clear_rx_desc()
371 * @q: the SGE free list to clean up
376 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) in free_rx_bufs() argument
378 unsigned int cidx = q->cidx; in free_rx_bufs()
380 while (q->credits--) { in free_rx_bufs()
381 struct rx_sw_desc *d = &q->sdesc[cidx]; in free_rx_bufs()
384 clear_rx_desc(pdev, q, d); in free_rx_bufs()
385 if (++cidx == q->size) in free_rx_bufs()
389 if (q->pg_chunk.page) { in free_rx_bufs()
390 __free_pages(q->pg_chunk.page, q->order); in free_rx_bufs()
391 q->pg_chunk.page = NULL; in free_rx_bufs()
438 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, in alloc_pg_chunk() argument
442 if (!q->pg_chunk.page) { in alloc_pg_chunk()
445 q->pg_chunk.page = alloc_pages(gfp, order); in alloc_pg_chunk()
446 if (unlikely(!q->pg_chunk.page)) in alloc_pg_chunk()
448 q->pg_chunk.va = page_address(q->pg_chunk.page); in alloc_pg_chunk()
449 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - in alloc_pg_chunk()
451 q->pg_chunk.offset = 0; in alloc_pg_chunk()
452 mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page, in alloc_pg_chunk()
453 0, q->alloc_size, DMA_FROM_DEVICE); in alloc_pg_chunk()
455 __free_pages(q->pg_chunk.page, order); in alloc_pg_chunk()
456 q->pg_chunk.page = NULL; in alloc_pg_chunk()
459 q->pg_chunk.mapping = mapping; in alloc_pg_chunk()
461 sd->pg_chunk = q->pg_chunk; in alloc_pg_chunk()
465 q->pg_chunk.offset += q->buf_size; in alloc_pg_chunk()
466 if (q->pg_chunk.offset == (PAGE_SIZE << order)) in alloc_pg_chunk()
467 q->pg_chunk.page = NULL; in alloc_pg_chunk()
469 q->pg_chunk.va += q->buf_size; in alloc_pg_chunk()
470 get_page(q->pg_chunk.page); in alloc_pg_chunk()
474 *sd->pg_chunk.p_cnt = 1; in alloc_pg_chunk()
476 *sd->pg_chunk.p_cnt += 1; in alloc_pg_chunk()
481 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
483 if (q->pend_cred >= q->credits / 4) { in ring_fl_db()
484 q->pend_cred = 0; in ring_fl_db()
486 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); in ring_fl_db()
493 * @q: the free-list to refill
501 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) in refill_fl() argument
503 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
504 struct rx_desc *d = &q->desc[q->pidx]; in refill_fl()
511 if (q->use_pages) { in refill_fl()
512 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, in refill_fl()
513 q->order))) { in refill_fl()
514 nomem: q->alloc_failed++; in refill_fl()
520 add_one_rx_chunk(mapping, d, q->gen); in refill_fl()
522 q->buf_size - SGE_PG_RSVD, in refill_fl()
527 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); in refill_fl()
533 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, in refill_fl()
534 q->gen, adap->pdev); in refill_fl()
536 clear_rx_desc(adap->pdev, q, sd); in refill_fl()
543 if (++q->pidx == q->size) { in refill_fl()
544 q->pidx = 0; in refill_fl()
545 q->gen ^= 1; in refill_fl()
546 sd = q->sdesc; in refill_fl()
547 d = q->desc; in refill_fl()
552 q->credits += count; in refill_fl()
553 q->pend_cred += count; in refill_fl()
554 ring_fl_db(adap, q); in refill_fl()
568 * @q: the SGE free list
574 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, in recycle_rx_buf() argument
577 struct rx_desc *from = &q->desc[idx]; in recycle_rx_buf()
578 struct rx_desc *to = &q->desc[q->pidx]; in recycle_rx_buf()
580 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
584 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); in recycle_rx_buf()
585 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); in recycle_rx_buf()
587 if (++q->pidx == q->size) { in recycle_rx_buf()
588 q->pidx = 0; in recycle_rx_buf()
589 q->gen ^= 1; in recycle_rx_buf()
592 q->credits++; in recycle_rx_buf()
593 q->pend_cred++; in recycle_rx_buf()
594 ring_fl_db(adap, q); in recycle_rx_buf()
637 * @q: the queue set
643 static void t3_reset_qset(struct sge_qset *q) in t3_reset_qset() argument
645 if (q->adap && in t3_reset_qset()
646 !(q->adap->flags & NAPI_INIT)) { in t3_reset_qset()
647 memset(q, 0, sizeof(*q)); in t3_reset_qset()
651 q->adap = NULL; in t3_reset_qset()
652 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset()
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); in t3_reset_qset()
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
655 q->txq_stopped = 0; in t3_reset_qset()
656 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ in t3_reset_qset()
657 q->rx_reclaim_timer.function = NULL; in t3_reset_qset()
658 q->nomem = 0; in t3_reset_qset()
659 napi_free_frags(&q->napi); in t3_reset_qset()
666 * @q: the queue set
672 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) in t3_free_qset() argument
678 if (q->fl[i].desc) { in t3_free_qset()
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); in t3_free_qset()
682 free_rx_bufs(pdev, &q->fl[i]); in t3_free_qset()
683 kfree(q->fl[i].sdesc); in t3_free_qset()
685 q->fl[i].size * in t3_free_qset()
686 sizeof(struct rx_desc), q->fl[i].desc, in t3_free_qset()
687 q->fl[i].phys_addr); in t3_free_qset()
691 if (q->txq[i].desc) { in t3_free_qset()
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
695 if (q->txq[i].sdesc) { in t3_free_qset()
696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
697 q->txq[i].in_use); in t3_free_qset()
698 kfree(q->txq[i].sdesc); in t3_free_qset()
701 q->txq[i].size * in t3_free_qset()
703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
704 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
707 if (q->rspq.desc) { in t3_free_qset()
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset()
712 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset()
713 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset()
716 t3_reset_qset(q); in t3_free_qset()
730 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt()
747 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ in sgl_len()
748 return (3 * n) / 2 + (n & 1); in sgl_len()
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), in get_packet()
824 * @q: the queue
840 struct sge_rspq *q, unsigned int len, in get_packet_pg() argument
848 newskb = skb = q->pg_skb; in get_packet_pg()
863 q->rx_recycle_buf++; in get_packet_pg()
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) in get_packet_pg()
943 return 1; in calc_tx_descs()
945 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; in calc_tx_descs()
989 dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb), in map_skb()
1024 j ^= 1; in write_sgl()
1036 * @q: the Tx queue
1045 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) in check_ring_tx_db() argument
1048 clear_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1049 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { in check_ring_tx_db()
1050 set_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1052 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1057 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1064 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); in wr_gen2()
1074 * @q: the SGE Tx queue
1089 const struct sge_txq *q, in write_wr_hdr_sgl() argument
1096 struct tx_sw_desc *sd = &q->sdesc[pidx]; in write_wr_hdr_sgl()
1105 if (likely(ndesc == 1)) { in write_wr_hdr_sgl()
1106 sd->eop = 1; in write_wr_hdr_sgl()
1107 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1118 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1136 if (++pidx == q->size) { in write_wr_hdr_sgl()
1138 gen ^= 1; in write_wr_hdr_sgl()
1139 d = q->desc; in write_wr_hdr_sgl()
1140 sd = q->sdesc; in write_wr_hdr_sgl()
1145 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1146 V_WR_SGLSFLT(1)) | wr_hi; in write_wr_hdr_sgl()
1148 sgl_flits + 1)) | in write_wr_hdr_sgl()
1151 flits = 1; in write_wr_hdr_sgl()
1153 sd->eop = 1; in write_wr_hdr_sgl()
1169 * @q: the Tx queue
1179 struct sge_txq *q, unsigned int ndesc, in write_tx_pkt_wr() argument
1183 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_tx_pkt_wr()
1184 struct tx_desc *d = &q->desc[pidx]; in write_tx_pkt_wr()
1215 q->sdesc[pidx].skb = NULL; in write_tx_pkt_wr()
1228 V_WR_TID(q->token)); in write_tx_pkt_wr()
1237 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_tx_pkt_wr()
1240 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, in write_tx_pkt_wr()
1242 htonl(V_WR_TID(q->token))); in write_tx_pkt_wr()
1246 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument
1250 q->stops++; in t3_stop_tx_queue()
1268 struct sge_txq *q; in t3_eth_xmit() local
1269 dma_addr_t addr[MAX_SKB_FRAGS + 1]; in t3_eth_xmit()
1282 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit()
1285 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in t3_eth_xmit()
1287 credits = q->size - q->in_use; in t3_eth_xmit()
1291 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1294 dev->name, q->cntxt_id & 7); in t3_eth_xmit()
1306 q->in_use += ndesc; in t3_eth_xmit()
1307 if (unlikely(credits - ndesc < q->stop_thres)) { in t3_eth_xmit()
1308 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1310 if (should_restart_tx(q) && in t3_eth_xmit()
1312 q->restarts++; in t3_eth_xmit()
1317 gen = q->gen; in t3_eth_xmit()
1318 q->unacked += ndesc; in t3_eth_xmit()
1319 compl = (q->unacked & 8) << (S_WR_COMPL - 3); in t3_eth_xmit()
1320 q->unacked &= 7; in t3_eth_xmit()
1321 pidx = q->pidx; in t3_eth_xmit()
1322 q->pidx += ndesc; in t3_eth_xmit()
1323 if (q->pidx >= q->size) { in t3_eth_xmit()
1324 q->pidx -= q->size; in t3_eth_xmit()
1325 q->gen ^= 1; in t3_eth_xmit()
1363 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); in t3_eth_xmit()
1364 check_ring_tx_db(adap, q); in t3_eth_xmit()
1387 memcpy(&to[1], &from[1], len - sizeof(*from)); in write_imm()
1389 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); in write_imm()
1403 * @q: the send queue
1413 * Returns 0 if enough descriptors are available, 1 if there aren't
1418 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, in check_desc_avail() argument
1422 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1423 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1424 return 1; in check_desc_avail()
1426 if (unlikely(q->size - q->in_use < ndesc)) { in check_desc_avail()
1427 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail()
1432 if (should_restart_tx(q) && in check_desc_avail()
1436 q->stops++; in check_desc_avail()
1444 * @q: the SGE control Tx queue
1450 static inline void reclaim_completed_tx_imm(struct sge_txq *q) in reclaim_completed_tx_imm() argument
1452 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx_imm()
1454 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1455 q->cleaned += reclaim; in reclaim_completed_tx_imm()
1466 * @q: the control queue
1473 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, in ctrl_xmit() argument
1480 WARN_ON(1); in ctrl_xmit()
1486 wrp->wr_lo = htonl(V_WR_TID(q->token)); in ctrl_xmit()
1488 spin_lock(&q->lock); in ctrl_xmit()
1489 again:reclaim_completed_tx_imm(q); in ctrl_xmit()
1491 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); in ctrl_xmit()
1493 if (ret == 1) { in ctrl_xmit()
1494 spin_unlock(&q->lock); in ctrl_xmit()
1500 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1502 q->in_use++; in ctrl_xmit()
1503 if (++q->pidx >= q->size) { in ctrl_xmit()
1504 q->pidx = 0; in ctrl_xmit()
1505 q->gen ^= 1; in ctrl_xmit()
1507 spin_unlock(&q->lock); in ctrl_xmit()
1510 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in ctrl_xmit()
1525 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq() local
1527 spin_lock(&q->lock); in restart_ctrlq()
1528 again:reclaim_completed_tx_imm(q); in restart_ctrlq()
1530 while (q->in_use < q->size && in restart_ctrlq()
1531 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1533 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1535 if (++q->pidx >= q->size) { in restart_ctrlq()
1536 q->pidx = 0; in restart_ctrlq()
1537 q->gen ^= 1; in restart_ctrlq()
1539 q->in_use++; in restart_ctrlq()
1542 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1546 if (should_restart_tx(q) && in restart_ctrlq()
1549 q->stops++; in restart_ctrlq()
1552 spin_unlock(&q->lock); in restart_ctrlq()
1555 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_ctrlq()
1610 *p++ = be64_to_cpu(sgl->addr[1]); in setup_deferred_unmapping()
1620 * @q: the Tx queue
1630 struct sge_txq *q, unsigned int pidx, in write_ofld_wr() argument
1636 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_ofld_wr()
1637 struct tx_desc *d = &q->desc[pidx]; in write_ofld_wr()
1640 q->sdesc[pidx].skb = NULL; in write_ofld_wr()
1648 memcpy(&d->flit[1], &from[1], in write_ofld_wr()
1652 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_ofld_wr()
1661 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, in write_ofld_wr()
1677 return 1; /* packet fits as immediate data */ in calc_tx_descs_ofld()
1689 * @q: the Tx offload queue
1694 static int ofld_xmit(struct adapter *adap, struct sge_txq *q, in ofld_xmit() argument
1700 spin_lock(&q->lock); in ofld_xmit()
1701 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in ofld_xmit()
1703 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); in ofld_xmit()
1705 if (ret == 1) { in ofld_xmit()
1707 spin_unlock(&q->lock); in ofld_xmit()
1715 spin_unlock(&q->lock); in ofld_xmit()
1719 gen = q->gen; in ofld_xmit()
1720 q->in_use += ndesc; in ofld_xmit()
1721 pidx = q->pidx; in ofld_xmit()
1722 q->pidx += ndesc; in ofld_xmit()
1723 if (q->pidx >= q->size) { in ofld_xmit()
1724 q->pidx -= q->size; in ofld_xmit()
1725 q->gen ^= 1; in ofld_xmit()
1727 spin_unlock(&q->lock); in ofld_xmit()
1729 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); in ofld_xmit()
1730 check_ring_tx_db(adap, q); in ofld_xmit()
1745 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq() local
1750 spin_lock(&q->lock); in restart_offloadq()
1751 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in restart_offloadq()
1753 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1757 if (unlikely(q->size - q->in_use < ndesc)) { in restart_offloadq()
1761 if (should_restart_tx(q) && in restart_offloadq()
1764 q->stops++; in restart_offloadq()
1772 gen = q->gen; in restart_offloadq()
1773 q->in_use += ndesc; in restart_offloadq()
1774 pidx = q->pidx; in restart_offloadq()
1775 q->pidx += ndesc; in restart_offloadq()
1777 if (q->pidx >= q->size) { in restart_offloadq()
1778 q->pidx -= q->size; in restart_offloadq()
1779 q->gen ^= 1; in restart_offloadq()
1781 __skb_unlink(skb, &q->sendq); in restart_offloadq()
1782 spin_unlock(&q->lock); in restart_offloadq()
1784 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, in restart_offloadq()
1786 spin_lock(&q->lock); in restart_offloadq()
1788 spin_unlock(&q->lock); in restart_offloadq()
1791 set_bit(TXQ_RUNNING, &q->flags); in restart_offloadq()
1792 set_bit(TXQ_LAST_PKT_DB, &q->flags); in restart_offloadq()
1797 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_offloadq()
1805 * set is carried in bits 1-3 in the packet's priority.
1809 return skb->priority >> 1; in queue_set()
1821 return skb->priority & 1; in is_ctrl_pkt()
1831 * should be sent as regular or control, bits 1-3 select the queue set.
1846 * @q: the SGE response queue
1853 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) in offload_enqueue() argument
1855 int was_empty = skb_queue_empty(&q->rx_queue); in offload_enqueue()
1857 __skb_queue_tail(&q->rx_queue, skb); in offload_enqueue()
1860 struct sge_qset *qs = rspq_to_qset(q); in offload_enqueue()
1869 * @q: the SGE response queue that assembled the bundle
1876 struct sge_rspq *q, in deliver_partial_bundle() argument
1880 q->offload_bundles++; in deliver_partial_bundle()
1899 struct sge_rspq *q = &qs->rspq; in ofld_poll() local
1908 spin_lock_irq(&q->lock); in ofld_poll()
1910 skb_queue_splice_init(&q->rx_queue, &queue); in ofld_poll()
1913 spin_unlock_irq(&q->lock); in ofld_poll()
1916 spin_unlock_irq(&q->lock); in ofld_poll()
1928 q->offload_bundles++; in ofld_poll()
1936 spin_lock_irq(&q->lock); in ofld_poll()
1937 skb_queue_splice(&queue, &q->rx_queue); in ofld_poll()
1938 spin_unlock_irq(&q->lock); in ofld_poll()
1940 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()
2038 arp_ptr = (unsigned char *)(arp + 1); in cxgb3_arp_process()
2281 * @q: the response queue
2287 const struct sge_rspq *q) in is_new_response() argument
2289 return (r->intr_gen & F_RSPD_GEN2) == q->gen; in is_new_response()
2292 static inline void clear_rspq_bufstate(struct sge_rspq * const q) in clear_rspq_bufstate() argument
2294 q->pg_skb = NULL; in clear_rspq_bufstate()
2295 q->rx_recycle_buf = 0; in clear_rspq_bufstate()
2325 struct sge_rspq *q = &qs->rspq; in process_responses() local
2326 struct rsp_desc *r = &q->desc[q->cidx]; in process_responses()
2332 q->next_holdoff = q->holdoff_tmr; in process_responses()
2334 while (likely(budget_left && is_new_response(r, q))) { in process_responses()
2355 q->async_notif++; in process_responses()
2360 q->next_holdoff = NOMEM_INTR_DELAY; in process_responses()
2361 q->nomem++; in process_responses()
2366 q->imm_data++; in process_responses()
2373 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; in process_responses()
2386 skb = get_packet_pg(adap, fl, q, in process_responses()
2390 q->pg_skb = skb; in process_responses()
2397 q->rx_drops++; in process_responses()
2404 q->pure_rsps++; in process_responses()
2412 if (unlikely(++q->cidx == q->size)) { in process_responses()
2413 q->cidx = 0; in process_responses()
2414 q->gen ^= 1; in process_responses()
2415 r = q->desc; in process_responses()
2419 if (++q->credits >= (q->size / 4)) { in process_responses()
2420 refill_rspq(adap, q, q->credits); in process_responses()
2421 q->credits = 0; in process_responses()
2430 rx_eth(adap, q, skb, ethpad, lro); in process_responses()
2432 q->offload_pkts++; in process_responses()
2436 ngathered = rx_offload(&adap->tdev, q, skb, in process_responses()
2442 clear_rspq_bufstate(q); in process_responses()
2447 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); in process_responses()
2516 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2521 struct sge_rspq *q = &qs->rspq; in process_pure_responses() local
2528 if (unlikely(++q->cidx == q->size)) { in process_pure_responses()
2529 q->cidx = 0; in process_pure_responses()
2530 q->gen ^= 1; in process_pure_responses()
2531 r = q->desc; in process_pure_responses()
2540 q->pure_rsps++; in process_pure_responses()
2541 if (++q->credits >= (q->size / 4)) { in process_pure_responses()
2542 refill_rspq(adap, q, q->credits); in process_pure_responses()
2543 q->credits = 0; in process_pure_responses()
2545 if (!is_new_response(r, q)) in process_pure_responses()
2557 return is_new_response(r, q); in process_pure_responses()
2563 * @q: the response queue
2566 * new SGE responses. If there are no new responses it returns -1. If
2570 * signaling responses it schedules the NAPI handler. Returns 1 if it
2575 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) in handle_responses() argument
2577 struct sge_qset *qs = rspq_to_qset(q); in handle_responses()
2578 struct rsp_desc *r = &q->desc[q->cidx]; in handle_responses()
2580 if (!is_new_response(r, q)) in handle_responses()
2581 return -1; in handle_responses()
2584 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in handle_responses()
2585 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); in handle_responses()
2589 return 1; in handle_responses()
2600 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix() local
2602 spin_lock(&q->lock); in t3_sge_intr_msix()
2603 if (process_responses(adap, qs, -1) == 0) in t3_sge_intr_msix()
2604 q->unhandled_irqs++; in t3_sge_intr_msix()
2605 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_sge_intr_msix()
2606 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_sge_intr_msix()
2607 spin_unlock(&q->lock); in t3_sge_intr_msix()
2618 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix_napi() local
2620 spin_lock(&q->lock); in t3_sge_intr_msix_napi()
2622 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2623 q->unhandled_irqs++; in t3_sge_intr_msix_napi()
2624 spin_unlock(&q->lock); in t3_sge_intr_msix_napi()
2638 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi() local
2640 spin_lock(&q->lock); in t3_intr_msi()
2642 if (process_responses(adap, &adap->sge.qs[0], -1)) { in t3_intr_msi()
2643 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_intr_msi()
2644 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_intr_msi()
2645 new_packets = 1; in t3_intr_msi()
2649 process_responses(adap, &adap->sge.qs[1], -1)) { in t3_intr_msi()
2650 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr_msi()
2655 new_packets = 1; in t3_intr_msi()
2659 q->unhandled_irqs++; in t3_intr_msi()
2661 spin_unlock(&q->lock); in t3_intr_msi()
2667 struct sge_rspq *q = &qs->rspq; in rspq_check_napi() local
2669 return is_new_response(&q->desc[q->cidx], q) && napi_schedule(&qs->napi); in rspq_check_napi()
2683 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi() local
2685 spin_lock(&q->lock); in t3_intr_msi_napi()
2689 new_packets += rspq_check_napi(&adap->sge.qs[1]); in t3_intr_msi_napi()
2691 q->unhandled_irqs++; in t3_intr_msi_napi()
2693 spin_unlock(&q->lock); in t3_intr_msi_napi()
2705 work = process_responses(adap, rspq_to_qset(rq), -1); in process_responses_gts()
2722 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr()
2772 if (likely(map & 1)) in t3b_intr()
2776 process_responses_gts(adap, &adap->sge.qs[1].rspq); in t3b_intr()
2807 if (likely(map & 1)) in t3b_intr_napi()
2811 napi_schedule(&adap->sge.qs[1].napi); in t3b_intr_napi()
2967 if (status & (1 << qs->rspq.cntxt_id)) { in sge_timer_rx()
2971 refill_rspq(adap, &qs->rspq, 1); in sge_timer_rx()
2974 1 << qs->rspq.cntxt_id); in sge_timer_rx()
2981 if (qs->fl[1].credits < qs->fl[1].size) in sge_timer_rx()
2982 __refill_fl(adap, &qs->fl[1]); in sge_timer_rx()
3000 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ in t3_update_qset_coalesce()
3027 struct sge_qset *q = &adapter->sge.qs[id]; in t3_sge_alloc_qset() local
3029 init_qset_cntxt(q, id); in t3_sge_alloc_qset()
3030 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); in t3_sge_alloc_qset()
3031 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); in t3_sge_alloc_qset()
3033 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, in t3_sge_alloc_qset()
3036 &q->fl[0].phys_addr, &q->fl[0].sdesc); in t3_sge_alloc_qset()
3037 if (!q->fl[0].desc) in t3_sge_alloc_qset()
3040 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, in t3_sge_alloc_qset()
3043 &q->fl[1].phys_addr, &q->fl[1].sdesc); in t3_sge_alloc_qset()
3044 if (!q->fl[1].desc) in t3_sge_alloc_qset()
3047 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, in t3_sge_alloc_qset()
3049 &q->rspq.phys_addr, NULL); in t3_sge_alloc_qset()
3050 if (!q->rspq.desc) in t3_sge_alloc_qset()
3060 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], in t3_sge_alloc_qset()
3062 &q->txq[i].phys_addr, in t3_sge_alloc_qset()
3063 &q->txq[i].sdesc); in t3_sge_alloc_qset()
3064 if (!q->txq[i].desc) in t3_sge_alloc_qset()
3067 q->txq[i].gen = 1; in t3_sge_alloc_qset()
3068 q->txq[i].size = p->txq_size[i]; in t3_sge_alloc_qset()
3069 spin_lock_init(&q->txq[i].lock); in t3_sge_alloc_qset()
3070 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()
3073 INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq); in t3_sge_alloc_qset()
3074 INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq); in t3_sge_alloc_qset()
3076 q->fl[0].gen = q->fl[1].gen = 1; in t3_sge_alloc_qset()
3077 q->fl[0].size = p->fl_size; in t3_sge_alloc_qset()
3078 q->fl[1].size = p->jumbo_size; in t3_sge_alloc_qset()
3080 q->rspq.gen = 1; in t3_sge_alloc_qset()
3081 q->rspq.size = p->rspq_size; in t3_sge_alloc_qset()
3082 spin_lock_init(&q->rspq.lock); in t3_sge_alloc_qset()
3083 skb_queue_head_init(&q->rspq.rx_queue); in t3_sge_alloc_qset()
3085 q->txq[TXQ_ETH].stop_thres = nports * in t3_sge_alloc_qset()
3086 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); in t3_sge_alloc_qset()
3089 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3091 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); in t3_sge_alloc_qset()
3094 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3096 q->fl[1].buf_size = is_offload(adapter) ? in t3_sge_alloc_qset()
3101 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3102 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3103 q->fl[0].order = FL0_PG_ORDER; in t3_sge_alloc_qset()
3104 q->fl[1].order = FL1_PG_ORDER; in t3_sge_alloc_qset()
3105 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3106 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3111 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, in t3_sge_alloc_qset()
3112 q->rspq.phys_addr, q->rspq.size, in t3_sge_alloc_qset()
3113 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); in t3_sge_alloc_qset()
3118 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, in t3_sge_alloc_qset()
3119 q->fl[i].phys_addr, q->fl[i].size, in t3_sge_alloc_qset()
3120 q->fl[i].buf_size - SGE_PG_RSVD, in t3_sge_alloc_qset()
3121 p->cong_thres, 1, 0); in t3_sge_alloc_qset()
3126 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, in t3_sge_alloc_qset()
3127 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, in t3_sge_alloc_qset()
3128 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, in t3_sge_alloc_qset()
3129 1, 0); in t3_sge_alloc_qset()
3133 if (ntxq > 1) { in t3_sge_alloc_qset()
3134 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, in t3_sge_alloc_qset()
3136 q->txq[TXQ_OFLD].phys_addr, in t3_sge_alloc_qset()
3137 q->txq[TXQ_OFLD].size, 0, 1, 0); in t3_sge_alloc_qset()
3143 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, in t3_sge_alloc_qset()
3145 q->txq[TXQ_CTRL].phys_addr, in t3_sge_alloc_qset()
3146 q->txq[TXQ_CTRL].size, in t3_sge_alloc_qset()
3147 q->txq[TXQ_CTRL].token, 1, 0); in t3_sge_alloc_qset()
3154 q->adap = adapter; in t3_sge_alloc_qset()
3155 q->netdev = dev; in t3_sge_alloc_qset()
3156 q->tx_q = netdevq; in t3_sge_alloc_qset()
3157 t3_update_qset_coalesce(q, p); in t3_sge_alloc_qset()
3159 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, in t3_sge_alloc_qset()
3166 if (avail < q->fl[0].size) in t3_sge_alloc_qset()
3170 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, in t3_sge_alloc_qset()
3172 if (avail < q->fl[1].size) in t3_sge_alloc_qset()
3173 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", in t3_sge_alloc_qset()
3175 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); in t3_sge_alloc_qset()
3177 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | in t3_sge_alloc_qset()
3178 V_NEWTIMER(q->rspq.holdoff_tmr)); in t3_sge_alloc_qset()
3185 t3_free_qset(adapter, q); in t3_sge_alloc_qset()
3200 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers() local
3202 if (q->tx_reclaim_timer.function) in t3_start_sge_timers()
3203 mod_timer(&q->tx_reclaim_timer, in t3_start_sge_timers()
3206 if (q->rx_reclaim_timer.function) in t3_start_sge_timers()
3207 mod_timer(&q->rx_reclaim_timer, in t3_start_sge_timers()
3223 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers() local
3225 if (q->tx_reclaim_timer.function) in t3_stop_sge_timers()
3226 del_timer_sync(&q->tx_reclaim_timer); in t3_stop_sge_timers()
3227 if (q->rx_reclaim_timer.function) in t3_stop_sge_timers()
3228 del_timer_sync(&q->rx_reclaim_timer); in t3_stop_sge_timers()
3316 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; in t3_sge_init()
3317 #if SGE_NUM_GENBITS == 1 in t3_sge_init()
3356 struct qset_params *q = p->qset + i; in t3_sge_prep() local
3358 q->polling = adap->params.rev > 0; in t3_sge_prep()
3359 q->coalesce_usecs = 5; in t3_sge_prep()
3360 q->rspq_size = 1024; in t3_sge_prep()
3361 q->fl_size = 1024; in t3_sge_prep()
3362 q->jumbo_size = 512; in t3_sge_prep()
3363 q->txq_size[TXQ_ETH] = 1024; in t3_sge_prep()
3364 q->txq_size[TXQ_OFLD] = 1024; in t3_sge_prep()
3365 q->txq_size[TXQ_CTRL] = 256; in t3_sge_prep()
3366 q->cong_thres = 0; in t3_sge_prep()