Lines Matching +full:cct +full:- +full:increase
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2015 - 2018 Intel Corporation.
18 __must_hold(&qp->s_lock) in find_prev_entry()
24 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry()
25 if (i == qp->s_tail_ack_queue) in find_prev_entry()
28 p = i - 1; in find_prev_entry()
30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry()
31 if (p == qp->r_head_ack_queue) { in find_prev_entry()
35 e = &qp->s_ack_queue[p]; in find_prev_entry()
36 if (!e->opcode) { in find_prev_entry()
40 if (cmp_psn(psn, e->psn) >= 0) { in find_prev_entry()
41 if (p == qp->s_tail_ack_queue && in find_prev_entry()
42 cmp_psn(psn, e->lpsn) <= 0) in find_prev_entry()
57 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
75 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); in make_rc_ack()
77 u32 pmtu = qp->pmtu; in make_rc_ack()
78 struct hfi1_qp_priv *qpriv = qp->priv; in make_rc_ack()
81 u8 next = qp->s_tail_ack_queue; in make_rc_ack()
85 lockdep_assert_held(&qp->s_lock); in make_rc_ack()
87 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in make_rc_ack()
90 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B) in make_rc_ack()
91 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ in make_rc_ack()
94 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ in make_rc_ack()
97 switch (qp->s_ack_state) { in make_rc_ack()
100 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
109 if (++next > rvt_size_atomic(&dev->rdi)) in make_rc_ack()
115 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
116 if (e->opcode != TID_OP(WRITE_REQ) && in make_rc_ack()
117 qp->s_acked_ack_queue == qp->s_tail_ack_queue) in make_rc_ack()
118 qp->s_acked_ack_queue = next; in make_rc_ack()
119 qp->s_tail_ack_queue = next; in make_rc_ack()
120 trace_hfi1_rsp_make_rc_ack(qp, e->psn); in make_rc_ack()
125 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { in make_rc_ack()
126 if (qp->s_flags & RVT_S_ACK_PENDING) in make_rc_ack()
131 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
133 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) || in make_rc_ack()
135 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB); in make_rc_ack()
138 if (e->opcode == OP(RDMA_READ_REQUEST)) { in make_rc_ack()
143 * responder has seen until the requester re-sends it. in make_rc_ack()
145 len = e->rdma_sge.sge_length; in make_rc_ack()
146 if (len && !e->rdma_sge.mr) { in make_rc_ack()
147 if (qp->s_acked_ack_queue == in make_rc_ack()
148 qp->s_tail_ack_queue) in make_rc_ack()
149 qp->s_acked_ack_queue = in make_rc_ack()
150 qp->r_head_ack_queue; in make_rc_ack()
151 qp->s_tail_ack_queue = qp->r_head_ack_queue; in make_rc_ack()
155 ps->s_txreq->mr = e->rdma_sge.mr; in make_rc_ack()
156 if (ps->s_txreq->mr) in make_rc_ack()
157 rvt_get_mr(ps->s_txreq->mr); in make_rc_ack()
158 qp->s_ack_rdma_sge.sge = e->rdma_sge; in make_rc_ack()
159 qp->s_ack_rdma_sge.num_sge = 1; in make_rc_ack()
160 ps->s_txreq->ss = &qp->s_ack_rdma_sge; in make_rc_ack()
163 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); in make_rc_ack()
165 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); in make_rc_ack()
166 e->sent = 1; in make_rc_ack()
168 ohdr->u.aeth = rvt_compute_aeth(qp); in make_rc_ack()
170 qp->s_ack_rdma_psn = e->psn; in make_rc_ack()
171 bth2 = mask_psn(qp->s_ack_rdma_psn++); in make_rc_ack()
172 } else if (e->opcode == TID_OP(WRITE_REQ)) { in make_rc_ack()
181 if (req->state == TID_REQUEST_RESEND || in make_rc_ack()
182 req->state == TID_REQUEST_INIT_RESEND) in make_rc_ack()
184 qp->s_ack_state = TID_OP(WRITE_RESP); in make_rc_ack()
185 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg); in make_rc_ack()
187 } else if (e->opcode == TID_OP(READ_REQ)) { in make_rc_ack()
192 * responder has seen until the requester re-sends it. in make_rc_ack()
194 len = e->rdma_sge.sge_length; in make_rc_ack()
195 if (len && !e->rdma_sge.mr) { in make_rc_ack()
196 if (qp->s_acked_ack_queue == in make_rc_ack()
197 qp->s_tail_ack_queue) in make_rc_ack()
198 qp->s_acked_ack_queue = in make_rc_ack()
199 qp->r_head_ack_queue; in make_rc_ack()
200 qp->s_tail_ack_queue = qp->r_head_ack_queue; in make_rc_ack()
204 ps->s_txreq->mr = e->rdma_sge.mr; in make_rc_ack()
205 if (ps->s_txreq->mr) in make_rc_ack()
206 rvt_get_mr(ps->s_txreq->mr); in make_rc_ack()
207 qp->s_ack_rdma_sge.sge = e->rdma_sge; in make_rc_ack()
208 qp->s_ack_rdma_sge.num_sge = 1; in make_rc_ack()
209 qp->s_ack_state = TID_OP(READ_RESP); in make_rc_ack()
213 ps->s_txreq->ss = NULL; in make_rc_ack()
215 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); in make_rc_ack()
216 ohdr->u.at.aeth = rvt_compute_aeth(qp); in make_rc_ack()
217 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); in make_rc_ack()
218 hwords += sizeof(ohdr->u.at) / sizeof(u32); in make_rc_ack()
219 bth2 = mask_psn(e->psn); in make_rc_ack()
220 e->sent = 1; in make_rc_ack()
223 bth0 = qp->s_ack_state << 24; in make_rc_ack()
227 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); in make_rc_ack()
230 ps->s_txreq->ss = &qp->s_ack_rdma_sge; in make_rc_ack()
231 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; in make_rc_ack()
232 if (ps->s_txreq->mr) in make_rc_ack()
233 rvt_get_mr(ps->s_txreq->mr); in make_rc_ack()
234 len = qp->s_ack_rdma_sge.sge.sge_length; in make_rc_ack()
239 ohdr->u.aeth = rvt_compute_aeth(qp); in make_rc_ack()
241 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); in make_rc_ack()
242 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
243 e->sent = 1; in make_rc_ack()
245 bth0 = qp->s_ack_state << 24; in make_rc_ack()
246 bth2 = mask_psn(qp->s_ack_rdma_psn++); in make_rc_ack()
265 * 5.3 If more resources needed, do 2.1 - 2.3. in make_rc_ack()
270 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
278 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND && in make_rc_ack()
279 qp->s_tail_ack_queue == qpriv->r_tid_alloc && in make_rc_ack()
280 req->cur_seg == req->alloc_seg) { in make_rc_ack()
281 qpriv->rnr_nak_state = TID_RNR_NAK_SENT; in make_rc_ack()
285 bth2 = mask_psn(qp->s_ack_rdma_psn); in make_rc_ack()
288 &ps->s_txreq->ss); in make_rc_ack()
293 bth0 = qp->s_ack_state << 24; in make_rc_ack()
294 qp->s_ack_rdma_psn++; in make_rc_ack()
295 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn, in make_rc_ack()
296 e->lpsn, req); in make_rc_ack()
297 if (req->cur_seg != req->total_segs) in make_rc_ack()
300 e->sent = 1; in make_rc_ack()
301 /* Do not free e->rdma_sge until all data are received */ in make_rc_ack()
302 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); in make_rc_ack()
307 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
308 ps->s_txreq->ss = &qp->s_ack_rdma_sge; in make_rc_ack()
316 e->sent = 1; in make_rc_ack()
318 * Increment qp->s_tail_ack_queue through s_ack_state in make_rc_ack()
321 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); in make_rc_ack()
335 qp->s_ack_state = OP(SEND_ONLY); in make_rc_ack()
337 if (qp->s_nak_state) in make_rc_ack()
338 ohdr->u.aeth = in make_rc_ack()
339 cpu_to_be32((qp->r_msn & IB_MSN_MASK) | in make_rc_ack()
340 (qp->s_nak_state << in make_rc_ack()
343 ohdr->u.aeth = rvt_compute_aeth(qp); in make_rc_ack()
347 bth2 = mask_psn(qp->s_ack_psn); in make_rc_ack()
348 qp->s_flags &= ~RVT_S_ACK_PENDING; in make_rc_ack()
349 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_rc_ack()
350 ps->s_txreq->ss = NULL; in make_rc_ack()
352 qp->s_rdma_ack_cnt++; in make_rc_ack()
353 ps->s_txreq->sde = qpriv->s_sde; in make_rc_ack()
354 ps->s_txreq->s_cur_size = len; in make_rc_ack()
355 ps->s_txreq->hdr_dwords = hwords; in make_rc_ack()
359 spin_unlock_irqrestore(&qp->s_lock, ps->flags); in make_rc_ack()
360 spin_lock_irqsave(&qp->r_lock, ps->flags); in make_rc_ack()
361 spin_lock(&qp->s_lock); in make_rc_ack()
363 spin_unlock(&qp->s_lock); in make_rc_ack()
364 spin_unlock_irqrestore(&qp->r_lock, ps->flags); in make_rc_ack()
365 spin_lock_irqsave(&qp->s_lock, ps->flags); in make_rc_ack()
367 qp->s_ack_state = OP(ACKNOWLEDGE); in make_rc_ack()
373 qp->s_flags &= ~(RVT_S_RESP_PENDING in make_rc_ack()
380 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
390 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_rc_req()
391 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_make_rc_req()
397 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ in hfi1_make_rc_req()
401 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); in hfi1_make_rc_req()
402 u32 pmtu = qp->pmtu; in hfi1_make_rc_req()
410 lockdep_assert_held(&qp->s_lock); in hfi1_make_rc_req()
411 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_rc_req()
412 if (!ps->s_txreq) in hfi1_make_rc_req()
415 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { in hfi1_make_rc_req()
416 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ in hfi1_make_rc_req()
418 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) in hfi1_make_rc_req()
419 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; in hfi1_make_rc_req()
421 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; in hfi1_make_rc_req()
423 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ in hfi1_make_rc_req()
425 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && in hfi1_make_rc_req()
426 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) in hfi1_make_rc_req()
427 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; in hfi1_make_rc_req()
429 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; in hfi1_make_rc_req()
433 if ((qp->s_flags & RVT_S_RESP_PENDING) && in hfi1_make_rc_req()
437 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_rc_req()
438 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_rc_req()
441 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_rc_req()
444 if (iowait_sdma_pending(&priv->s_iowait)) { in hfi1_make_rc_req()
445 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_rc_req()
449 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req()
450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req()
456 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT)) in hfi1_make_rc_req()
459 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { in hfi1_make_rc_req()
460 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { in hfi1_make_rc_req()
461 qp->s_flags |= RVT_S_WAIT_PSN; in hfi1_make_rc_req()
464 qp->s_sending_psn = qp->s_psn; in hfi1_make_rc_req()
465 qp->s_sending_hpsn = qp->s_psn - 1; in hfi1_make_rc_req()
469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req()
471 switch (qp->s_state) { in hfi1_make_rc_req()
473 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) in hfi1_make_rc_req()
483 if (qp->s_cur == qp->s_tail) { in hfi1_make_rc_req()
485 if (qp->s_tail == READ_ONCE(qp->s_head)) { in hfi1_make_rc_req()
495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req()
496 qp->s_num_rd_atomic && in hfi1_make_rc_req()
497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req()
498 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) { in hfi1_make_rc_req()
499 qp->s_flags |= RVT_S_WAIT_FENCE; in hfi1_make_rc_req()
506 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req()
507 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req()
511 if (qp->s_last != qp->s_cur) in hfi1_make_rc_req()
513 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
514 qp->s_cur = 0; in hfi1_make_rc_req()
515 if (++qp->s_tail == qp->s_size) in hfi1_make_rc_req()
516 qp->s_tail = 0; in hfi1_make_rc_req()
517 if (!(wqe->wr.send_flags & in hfi1_make_rc_req()
521 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
528 atomic_dec(&qp->local_ops_pending); in hfi1_make_rc_req()
533 qp->s_psn = wqe->psn; in hfi1_make_rc_req()
540 len = wqe->length; in hfi1_make_rc_req()
541 ss = &qp->s_sge; in hfi1_make_rc_req()
542 bth2 = mask_psn(qp->s_psn); in hfi1_make_rc_req()
548 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) || in hfi1_make_rc_req()
552 switch (wqe->wr.opcode) { in hfi1_make_rc_req()
560 qp->s_state = OP(SEND_FIRST); in hfi1_make_rc_req()
564 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_rc_req()
565 qp->s_state = OP(SEND_ONLY); in hfi1_make_rc_req()
566 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_rc_req()
567 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); in hfi1_make_rc_req()
569 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
572 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); in hfi1_make_rc_req()
574 ohdr->u.ieth = cpu_to_be32( in hfi1_make_rc_req()
575 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
578 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
581 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
582 qp->s_cur = 0; in hfi1_make_rc_req()
586 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
587 qp->s_lsn++; in hfi1_make_rc_req()
595 wqe->rdma_wr.remote_addr, in hfi1_make_rc_req()
596 &ohdr->u.rc.reth); in hfi1_make_rc_req()
597 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
598 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
599 ohdr->u.rc.reth.length = cpu_to_be32(len); in hfi1_make_rc_req()
602 qp->s_state = OP(RDMA_WRITE_FIRST); in hfi1_make_rc_req()
606 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in hfi1_make_rc_req()
607 qp->s_state = OP(RDMA_WRITE_ONLY); in hfi1_make_rc_req()
609 qp->s_state = in hfi1_make_rc_req()
612 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
614 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
618 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
619 qp->s_cur = 0; in hfi1_make_rc_req()
627 if (atomic_read(&priv->n_tid_requests) >= in hfi1_make_rc_req()
631 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
632 qp->s_lsn++; in hfi1_make_rc_req()
639 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) { in hfi1_make_rc_req()
640 priv->s_tid_cur = qp->s_cur; in hfi1_make_rc_req()
641 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) { in hfi1_make_rc_req()
642 priv->s_tid_tail = qp->s_cur; in hfi1_make_rc_req()
643 priv->s_state = TID_OP(WRITE_RESP); in hfi1_make_rc_req()
645 } else if (priv->s_tid_cur == priv->s_tid_head) { in hfi1_make_rc_req()
649 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur); in hfi1_make_rc_req()
672 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE || in hfi1_make_rc_req()
673 __r->state == TID_REQUEST_INACTIVE || in hfi1_make_rc_req()
674 __r->state == TID_REQUEST_COMPLETE || in hfi1_make_rc_req()
675 ((__r->state == TID_REQUEST_ACTIVE || in hfi1_make_rc_req()
676 __r->state == TID_REQUEST_SYNC) && in hfi1_make_rc_req()
677 __r->comp_seg == __r->total_segs)) { in hfi1_make_rc_req()
678 if (priv->s_tid_tail == in hfi1_make_rc_req()
679 priv->s_tid_cur && in hfi1_make_rc_req()
680 priv->s_state == in hfi1_make_rc_req()
682 priv->s_tid_tail = qp->s_cur; in hfi1_make_rc_req()
683 priv->s_state = in hfi1_make_rc_req()
686 priv->s_tid_cur = qp->s_cur; in hfi1_make_rc_req()
696 * updated. However, the priv->s_state should. in hfi1_make_rc_req()
698 if (priv->s_tid_tail == qp->s_cur && in hfi1_make_rc_req()
699 priv->s_state == TID_OP(WRITE_DATA_LAST)) in hfi1_make_rc_req()
700 priv->s_state = TID_OP(WRITE_RESP); in hfi1_make_rc_req()
704 priv->s_tid_head = qp->s_cur; in hfi1_make_rc_req()
705 priv->pending_tid_w_resp += req->total_segs; in hfi1_make_rc_req()
706 atomic_inc(&priv->n_tid_requests); in hfi1_make_rc_req()
707 atomic_dec(&priv->n_requests); in hfi1_make_rc_req()
709 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
710 req->comp_seg = delta_psn(bth2, wqe->psn); in hfi1_make_rc_req()
713 * to re-receive them. in hfi1_make_rc_req()
715 req->setup_head = req->clear_tail; in hfi1_make_rc_req()
716 priv->pending_tid_w_resp += in hfi1_make_rc_req()
717 delta_psn(wqe->lpsn, bth2) + 1; in hfi1_make_rc_req()
722 wqe->wr.opcode, in hfi1_make_rc_req()
723 wqe->psn, wqe->lpsn, in hfi1_make_rc_req()
725 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
726 qp->s_cur = 0; in hfi1_make_rc_req()
734 if (qp->s_num_rd_atomic >= in hfi1_make_rc_req()
735 qp->s_max_rd_atomic) { in hfi1_make_rc_req()
736 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
739 qp->s_num_rd_atomic++; in hfi1_make_rc_req()
740 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
741 qp->s_lsn++; in hfi1_make_rc_req()
743 wqe->rdma_wr.remote_addr, in hfi1_make_rc_req()
744 &ohdr->u.rc.reth); in hfi1_make_rc_req()
745 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
746 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
747 ohdr->u.rc.reth.length = cpu_to_be32(len); in hfi1_make_rc_req()
748 qp->s_state = OP(RDMA_READ_REQUEST); in hfi1_make_rc_req()
749 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); in hfi1_make_rc_req()
753 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
754 qp->s_cur = 0; in hfi1_make_rc_req()
759 wpriv = wqe->priv; in hfi1_make_rc_req()
762 wqe->wr.opcode, in hfi1_make_rc_req()
763 wqe->psn, wqe->lpsn, in hfi1_make_rc_req()
765 delta = cmp_psn(qp->s_psn, wqe->psn); in hfi1_make_rc_req()
772 * but the qp->s_state is set to OP(RDMA_READ_REQUEST) in hfi1_make_rc_req()
774 * received just before this; (3) We are re-sending a in hfi1_make_rc_req()
777 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { in hfi1_make_rc_req()
778 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
783 &req->flows[req->setup_head]; in hfi1_make_rc_req()
791 if (!flow->npagesets) { in hfi1_make_rc_req()
792 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_rc_req()
793 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_rc_req()
794 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_rc_req()
795 qp->s_sge.total_len = wqe->length; in hfi1_make_rc_req()
796 qp->s_len = wqe->length; in hfi1_make_rc_req()
797 req->isge = 0; in hfi1_make_rc_req()
798 req->clear_tail = req->setup_head; in hfi1_make_rc_req()
799 req->flow_idx = req->setup_head; in hfi1_make_rc_req()
800 req->state = TID_REQUEST_ACTIVE; in hfi1_make_rc_req()
803 /* Re-send a request */ in hfi1_make_rc_req()
804 req->cur_seg = 0; in hfi1_make_rc_req()
805 req->comp_seg = 0; in hfi1_make_rc_req()
806 req->ack_pending = 0; in hfi1_make_rc_req()
807 req->flow_idx = req->clear_tail; in hfi1_make_rc_req()
808 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
810 req->s_next_psn = qp->s_psn; in hfi1_make_rc_req()
812 len = min_t(u32, req->seg_len, in hfi1_make_rc_req()
813 wqe->length - req->seg_len * req->cur_seg); in hfi1_make_rc_req()
821 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
822 qp->s_lsn++; in hfi1_make_rc_req()
824 ss = &wpriv->ss; in hfi1_make_rc_req()
826 if (req->cur_seg >= req->total_segs && in hfi1_make_rc_req()
827 ++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
828 qp->s_cur = 0; in hfi1_make_rc_req()
837 if (qp->s_num_rd_atomic >= in hfi1_make_rc_req()
838 qp->s_max_rd_atomic) { in hfi1_make_rc_req()
839 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
842 qp->s_num_rd_atomic++; in hfi1_make_rc_req()
845 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
846 qp->s_lsn++; in hfi1_make_rc_req()
847 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in hfi1_make_rc_req()
848 wqe->wr.opcode == IB_WR_OPFN) { in hfi1_make_rc_req()
849 qp->s_state = OP(COMPARE_SWAP); in hfi1_make_rc_req()
850 put_ib_ateth_swap(wqe->atomic_wr.swap, in hfi1_make_rc_req()
851 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
852 put_ib_ateth_compare(wqe->atomic_wr.compare_add, in hfi1_make_rc_req()
853 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
855 qp->s_state = OP(FETCH_ADD); in hfi1_make_rc_req()
856 put_ib_ateth_swap(wqe->atomic_wr.compare_add, in hfi1_make_rc_req()
857 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
858 put_ib_ateth_compare(0, &ohdr->u.atomic_eth); in hfi1_make_rc_req()
860 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, in hfi1_make_rc_req()
861 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
862 ohdr->u.atomic_eth.rkey = cpu_to_be32( in hfi1_make_rc_req()
863 wqe->atomic_wr.rkey); in hfi1_make_rc_req()
868 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
869 qp->s_cur = 0; in hfi1_make_rc_req()
875 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) { in hfi1_make_rc_req()
876 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_rc_req()
877 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_rc_req()
878 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_rc_req()
879 qp->s_sge.total_len = wqe->length; in hfi1_make_rc_req()
880 qp->s_len = wqe->length; in hfi1_make_rc_req()
883 qp->s_tail++; in hfi1_make_rc_req()
884 if (qp->s_tail >= qp->s_size) in hfi1_make_rc_req()
885 qp->s_tail = 0; in hfi1_make_rc_req()
887 if (wqe->wr.opcode == IB_WR_RDMA_READ || in hfi1_make_rc_req()
888 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in hfi1_make_rc_req()
889 qp->s_psn = wqe->lpsn + 1; in hfi1_make_rc_req()
890 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) in hfi1_make_rc_req()
891 qp->s_psn = req->s_next_psn; in hfi1_make_rc_req()
893 qp->s_psn++; in hfi1_make_rc_req()
898 * qp->s_state is normally set to the opcode of the in hfi1_make_rc_req()
906 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); in hfi1_make_rc_req()
909 qp->s_state = OP(SEND_MIDDLE); in hfi1_make_rc_req()
912 bth2 = mask_psn(qp->s_psn++); in hfi1_make_rc_req()
913 ss = &qp->s_sge; in hfi1_make_rc_req()
914 len = qp->s_len; in hfi1_make_rc_req()
920 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_rc_req()
921 qp->s_state = OP(SEND_LAST); in hfi1_make_rc_req()
922 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_rc_req()
923 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); in hfi1_make_rc_req()
925 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
928 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); in hfi1_make_rc_req()
930 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
933 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
936 qp->s_cur++; in hfi1_make_rc_req()
937 if (qp->s_cur >= qp->s_size) in hfi1_make_rc_req()
938 qp->s_cur = 0; in hfi1_make_rc_req()
943 * qp->s_state is normally set to the opcode of the in hfi1_make_rc_req()
951 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); in hfi1_make_rc_req()
954 qp->s_state = OP(RDMA_WRITE_MIDDLE); in hfi1_make_rc_req()
957 bth2 = mask_psn(qp->s_psn++); in hfi1_make_rc_req()
958 ss = &qp->s_sge; in hfi1_make_rc_req()
959 len = qp->s_len; in hfi1_make_rc_req()
965 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in hfi1_make_rc_req()
966 qp->s_state = OP(RDMA_WRITE_LAST); in hfi1_make_rc_req()
968 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); in hfi1_make_rc_req()
970 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
972 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
976 qp->s_cur++; in hfi1_make_rc_req()
977 if (qp->s_cur >= qp->s_size) in hfi1_make_rc_req()
978 qp->s_cur = 0; in hfi1_make_rc_req()
983 * qp->s_state is normally set to the opcode of the in hfi1_make_rc_req()
991 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; in hfi1_make_rc_req()
993 wqe->rdma_wr.remote_addr + len, in hfi1_make_rc_req()
994 &ohdr->u.rc.reth); in hfi1_make_rc_req()
995 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
996 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
997 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); in hfi1_make_rc_req()
998 qp->s_state = OP(RDMA_READ_REQUEST); in hfi1_make_rc_req()
999 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); in hfi1_make_rc_req()
1000 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; in hfi1_make_rc_req()
1001 qp->s_psn = wqe->lpsn + 1; in hfi1_make_rc_req()
1004 qp->s_cur++; in hfi1_make_rc_req()
1005 if (qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1006 qp->s_cur = 0; in hfi1_make_rc_req()
1016 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
1018 remote = rcu_dereference(priv->tid_rdma.remote); in hfi1_make_rc_req()
1019 req->comp_seg = delta_psn(qp->s_psn, wqe->psn); in hfi1_make_rc_req()
1020 len = wqe->length - (req->comp_seg * remote->max_len); in hfi1_make_rc_req()
1023 bth2 = mask_psn(qp->s_psn); in hfi1_make_rc_req()
1026 qp->s_psn = wqe->lpsn + 1; in hfi1_make_rc_req()
1028 qp->s_state = TID_OP(WRITE_REQ); in hfi1_make_rc_req()
1029 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1; in hfi1_make_rc_req()
1030 priv->s_tid_cur = qp->s_cur; in hfi1_make_rc_req()
1031 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1032 qp->s_cur = 0; in hfi1_make_rc_req()
1033 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode, in hfi1_make_rc_req()
1034 wqe->psn, wqe->lpsn, req); in hfi1_make_rc_req()
1038 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) in hfi1_make_rc_req()
1042 wpriv = wqe->priv; in hfi1_make_rc_req()
1044 * Back down. The field qp->s_psn has been set to the psn with in hfi1_make_rc_req()
1048 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps; in hfi1_make_rc_req()
1053 * time, we can use the req->state change to check if the in hfi1_make_rc_req()
1056 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
1058 if (req->state != TID_REQUEST_ACTIVE) { in hfi1_make_rc_req()
1064 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_make_rc_req()
1069 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
1070 len = min_t(u32, req->seg_len, in hfi1_make_rc_req()
1071 wqe->length - req->seg_len * req->cur_seg); in hfi1_make_rc_req()
1072 flow = &req->flows[req->flow_idx]; in hfi1_make_rc_req()
1073 len -= flow->sent; in hfi1_make_rc_req()
1074 req->s_next_psn = flow->flow_state.ib_lpsn + 1; in hfi1_make_rc_req()
1082 ss = &wpriv->ss; in hfi1_make_rc_req()
1084 if (req->cur_seg >= req->total_segs && in hfi1_make_rc_req()
1085 ++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1086 qp->s_cur = 0; in hfi1_make_rc_req()
1087 qp->s_psn = req->s_next_psn; in hfi1_make_rc_req()
1088 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, in hfi1_make_rc_req()
1089 wqe->psn, wqe->lpsn, req); in hfi1_make_rc_req()
1093 delta = cmp_psn(qp->s_psn, wqe->psn); in hfi1_make_rc_req()
1096 * of a new request, we need to change the qp->s_state so that in hfi1_make_rc_req()
1099 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 || in hfi1_make_rc_req()
1100 qp->s_cur == qp->s_tail) { in hfi1_make_rc_req()
1101 qp->s_state = OP(RDMA_READ_REQUEST); in hfi1_make_rc_req()
1102 if (delta == 0 || qp->s_cur == qp->s_tail) in hfi1_make_rc_req()
1109 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { in hfi1_make_rc_req()
1110 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
1114 wpriv = wqe->priv; in hfi1_make_rc_req()
1116 len = min_t(u32, req->seg_len, in hfi1_make_rc_req()
1117 wqe->length - req->seg_len * req->cur_seg); in hfi1_make_rc_req()
1125 ss = &wpriv->ss; in hfi1_make_rc_req()
1127 if (req->cur_seg >= req->total_segs && in hfi1_make_rc_req()
1128 ++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1129 qp->s_cur = 0; in hfi1_make_rc_req()
1130 qp->s_psn = req->s_next_psn; in hfi1_make_rc_req()
1131 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, in hfi1_make_rc_req()
1132 wqe->psn, wqe->lpsn, req); in hfi1_make_rc_req()
1135 qp->s_sending_hpsn = bth2; in hfi1_make_rc_req()
1136 delta = delta_psn(bth2, wqe->psn); in hfi1_make_rc_req()
1138 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) in hfi1_make_rc_req()
1140 if (qp->s_flags & RVT_S_SEND_ONE) { in hfi1_make_rc_req()
1141 qp->s_flags &= ~RVT_S_SEND_ONE; in hfi1_make_rc_req()
1142 qp->s_flags |= RVT_S_WAIT_ACK; in hfi1_make_rc_req()
1145 qp->s_len -= len; in hfi1_make_rc_req()
1146 ps->s_txreq->hdr_dwords = hwords; in hfi1_make_rc_req()
1147 ps->s_txreq->sde = priv->s_sde; in hfi1_make_rc_req()
1148 ps->s_txreq->ss = ss; in hfi1_make_rc_req()
1149 ps->s_txreq->s_cur_size = len; in hfi1_make_rc_req()
1153 bth0 | (qp->s_state << 24), in hfi1_make_rc_req()
1161 hfi1_put_txreq(ps->s_txreq); in hfi1_make_rc_req()
1162 ps->s_txreq = NULL; in hfi1_make_rc_req()
1166 hfi1_put_txreq(ps->s_txreq); in hfi1_make_rc_req()
1169 ps->s_txreq = NULL; in hfi1_make_rc_req()
1170 qp->s_flags &= ~RVT_S_BUSY; in hfi1_make_rc_req()
1176 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB); in hfi1_make_rc_req()
1184 if (qp->r_nak_state) in hfi1_make_bth_aeth()
1185 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | in hfi1_make_bth_aeth()
1186 (qp->r_nak_state << in hfi1_make_bth_aeth()
1189 ohdr->u.aeth = rvt_compute_aeth(qp); in hfi1_make_bth_aeth()
1191 ohdr->bth[0] = cpu_to_be32(bth0); in hfi1_make_bth_aeth()
1192 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn); in hfi1_make_bth_aeth()
1193 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); in hfi1_make_bth_aeth()
1198 struct rvt_qp *qp = packet->qp; in hfi1_queue_rc_ack()
1202 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_queue_rc_ack()
1203 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in hfi1_queue_rc_ack()
1205 ibp = rcd_to_iport(packet->rcd); in hfi1_queue_rc_ack()
1206 this_cpu_inc(*ibp->rvp.rc_qacks); in hfi1_queue_rc_ack()
1207 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; in hfi1_queue_rc_ack()
1208 qp->s_nak_state = qp->r_nak_state; in hfi1_queue_rc_ack()
1209 qp->s_ack_psn = qp->r_ack_psn; in hfi1_queue_rc_ack()
1211 qp->s_flags |= RVT_S_ECN; in hfi1_queue_rc_ack()
1216 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_queue_rc_ack()
1225 struct rvt_qp *qp = packet->qp; in hfi1_make_rc_ack_9B()
1226 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); in hfi1_make_rc_ack_9B()
1228 struct ib_header *hdr = &opa_hdr->ibh; in hfi1_make_rc_ack_9B()
1234 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B; in hfi1_make_rc_ack_9B()
1235 ohdr = &hdr->u.oth; in hfi1_make_rc_ack_9B()
1236 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ in hfi1_make_rc_ack_9B()
1239 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { in hfi1_make_rc_ack_9B()
1240 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, in hfi1_make_rc_ack_9B()
1241 rdma_ah_read_grh(&qp->remote_ah_attr), in hfi1_make_rc_ack_9B()
1242 *hwords - 2, SIZE_OF_CRC); in hfi1_make_rc_ack_9B()
1243 ohdr = &hdr->u.l.oth; in hfi1_make_rc_ack_9B()
1250 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); in hfi1_make_rc_ack_9B()
1253 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) << in hfi1_make_rc_ack_9B()
1257 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), in hfi1_make_rc_ack_9B()
1258 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr)); in hfi1_make_rc_ack_9B()
1261 if (qp->s_mig_state == IB_MIG_MIGRATED) in hfi1_make_rc_ack_9B()
1278 struct rvt_qp *qp = packet->qp; in hfi1_make_rc_ack_16B()
1279 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); in hfi1_make_rc_ack_16B()
1281 struct hfi1_16b_header *hdr = &opa_hdr->opah; in hfi1_make_rc_ack_16B()
1289 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B; in hfi1_make_rc_ack_16B()
1290 ohdr = &hdr->u.oth; in hfi1_make_rc_ack_16B()
1291 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */ in hfi1_make_rc_ack_16B()
1296 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && in hfi1_make_rc_ack_16B()
1297 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { in hfi1_make_rc_ack_16B()
1298 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, in hfi1_make_rc_ack_16B()
1299 rdma_ah_read_grh(&qp->remote_ah_attr), in hfi1_make_rc_ack_16B()
1300 *hwords - 4, *nwords); in hfi1_make_rc_ack_16B()
1301 ohdr = &hdr->u.l.oth; in hfi1_make_rc_ack_16B()
1307 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); in hfi1_make_rc_ack_16B()
1312 hfi1_make_16b_hdr(hdr, ppd->lid | in hfi1_make_rc_ack_16B()
1313 (rdma_ah_get_path_bits(&qp->remote_ah_attr) & in hfi1_make_rc_ack_16B()
1314 ((1 << ppd->lmc) - 1)), in hfi1_make_rc_ack_16B()
1315 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), in hfi1_make_rc_ack_16B()
1320 if (qp->s_mig_state == IB_MIG_MIGRATED) in hfi1_make_rc_ack_16B()
1331 /* We support only two types - 9B and 16B for now */
1338 * hfi1_send_rc_ack - Construct an ACK packet and send it
1346 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_send_rc_ack()
1347 struct rvt_qp *qp = packet->qp; in hfi1_send_rc_ack()
1349 struct hfi1_qp_priv *priv = qp->priv; in hfi1_send_rc_ack()
1351 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in hfi1_send_rc_ack()
1360 qp->r_adefered = 0; in hfi1_send_rc_ack()
1363 if (qp->s_flags & RVT_S_RESP_PENDING) { in hfi1_send_rc_ack()
1369 if (qp->s_rdma_ack_cnt) { in hfi1_send_rc_ack()
1379 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn, in hfi1_send_rc_ack()
1383 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, in hfi1_send_rc_ack()
1384 sc_to_vlt(ppd->dd, sc5), plen); in hfi1_send_rc_ack()
1385 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); in hfi1_send_rc_ack()
1396 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), in hfi1_send_rc_ack()
1400 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, in hfi1_send_rc_ack()
1401 (priv->hdr_type == HFI1_PKT_TYPE_9B ? in hfi1_send_rc_ack()
1408 * update_num_rd_atomic - update the qp->s_num_rd_atomic
1413 * This is called from reset_psn() to update qp->s_num_rd_atomic
1420 u32 opcode = wqe->wr.opcode; in update_num_rd_atomic()
1425 qp->s_num_rd_atomic++; in update_num_rd_atomic()
1428 struct hfi1_qp_priv *priv = qp->priv; in update_num_rd_atomic()
1430 if (cmp_psn(psn, wqe->lpsn) <= 0) { in update_num_rd_atomic()
1433 cur_seg = (psn - wqe->psn) / priv->pkts_ps; in update_num_rd_atomic()
1434 req->ack_pending = cur_seg - req->comp_seg; in update_num_rd_atomic()
1435 priv->pending_tid_r_segs += req->ack_pending; in update_num_rd_atomic()
1436 qp->s_num_rd_atomic += req->ack_pending; in update_num_rd_atomic()
1438 wqe->wr.opcode, in update_num_rd_atomic()
1439 wqe->psn, in update_num_rd_atomic()
1440 wqe->lpsn, in update_num_rd_atomic()
1443 priv->pending_tid_r_segs += req->total_segs; in update_num_rd_atomic()
1444 qp->s_num_rd_atomic += req->total_segs; in update_num_rd_atomic()
1450 * reset_psn - reset the QP state to send starting from PSN
1460 u32 n = qp->s_acked; in reset_psn()
1463 struct hfi1_qp_priv *priv = qp->priv; in reset_psn()
1465 lockdep_assert_held(&qp->s_lock); in reset_psn()
1466 qp->s_cur = n; in reset_psn()
1467 priv->pending_tid_r_segs = 0; in reset_psn()
1468 priv->pending_tid_w_resp = 0; in reset_psn()
1469 qp->s_num_rd_atomic = 0; in reset_psn()
1475 if (cmp_psn(psn, wqe->psn) <= 0) { in reset_psn()
1476 qp->s_state = OP(SEND_LAST); in reset_psn()
1485 if (++n == qp->s_size) in reset_psn()
1487 if (n == qp->s_tail) in reset_psn()
1490 diff = cmp_psn(psn, wqe->psn); in reset_psn()
1493 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in reset_psn()
1496 qp->s_cur = n; in reset_psn()
1502 qp->s_state = OP(SEND_LAST); in reset_psn()
1508 opcode = wqe->wr.opcode; in reset_psn()
1518 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); in reset_psn()
1523 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); in reset_psn()
1527 qp->s_state = TID_OP(WRITE_RESP); in reset_psn()
1531 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); in reset_psn()
1535 qp->s_state = TID_OP(READ_RESP); in reset_psn()
1543 qp->s_state = OP(SEND_LAST); in reset_psn()
1546 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; in reset_psn()
1547 qp->s_psn = psn; in reset_psn()
1553 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && in reset_psn()
1554 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) in reset_psn()
1555 qp->s_flags |= RVT_S_WAIT_PSN; in reset_psn()
1556 qp->s_flags &= ~HFI1_S_AHG_VALID; in reset_psn()
1561 * Back up requester to resend the last un-ACKed request.
1566 struct hfi1_qp_priv *priv = qp->priv; in hfi1_restart_rc()
1567 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_restart_rc()
1570 lockdep_assert_held(&qp->r_lock); in hfi1_restart_rc()
1571 lockdep_assert_held(&qp->s_lock); in hfi1_restart_rc()
1573 if (qp->s_retry == 0) { in hfi1_restart_rc()
1574 if (qp->s_mig_state == IB_MIG_ARMED) { in hfi1_restart_rc()
1576 qp->s_retry = qp->s_retry_cnt; in hfi1_restart_rc()
1577 } else if (qp->s_last == qp->s_acked) { in hfi1_restart_rc()
1582 if (wqe->wr.opcode == IB_WR_OPFN) { in hfi1_restart_rc()
1584 to_iport(qp->ibqp.device, qp->port_num); in hfi1_restart_rc()
1590 opfn_conn_reply(qp, priv->opfn.curr); in hfi1_restart_rc()
1592 qp->s_flags &= ~RVT_S_WAIT_ACK; in hfi1_restart_rc()
1595 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in hfi1_restart_rc()
1600 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_restart_rc()
1612 qp->s_retry--; in hfi1_restart_rc()
1615 ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_restart_rc()
1616 if (wqe->wr.opcode == IB_WR_RDMA_READ || in hfi1_restart_rc()
1617 wqe->wr.opcode == IB_WR_TID_RDMA_READ) in hfi1_restart_rc()
1618 ibp->rvp.n_rc_resends++; in hfi1_restart_rc()
1620 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); in hfi1_restart_rc()
1622 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | in hfi1_restart_rc()
1626 qp->s_flags |= RVT_S_SEND_ONE; in hfi1_restart_rc()
1631 * Set qp->s_sending_psn to the next PSN after the given one.
1638 u32 n = qp->s_last; in reset_sending_psn()
1640 lockdep_assert_held(&qp->s_lock); in reset_sending_psn()
1644 if (cmp_psn(psn, wqe->lpsn) <= 0) { in reset_sending_psn()
1645 if (wqe->wr.opcode == IB_WR_RDMA_READ || in reset_sending_psn()
1646 wqe->wr.opcode == IB_WR_TID_RDMA_READ || in reset_sending_psn()
1647 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in reset_sending_psn()
1648 qp->s_sending_psn = wqe->lpsn + 1; in reset_sending_psn()
1650 qp->s_sending_psn = psn + 1; in reset_sending_psn()
1653 if (++n == qp->s_size) in reset_sending_psn()
1655 if (n == qp->s_tail) in reset_sending_psn()
1661 * hfi1_rc_verbs_aborted - handle abort status
1686 ohdr->bth[2] = cpu_to_be32(psn); in hfi1_rc_verbs_aborted()
1687 qp->s_flags |= RVT_S_SEND_ONE; in hfi1_rc_verbs_aborted()
1696 struct hfi1_qp_priv *priv = qp->priv; in hfi1_rc_send_complete()
1702 lockdep_assert_held(&qp->s_lock); in hfi1_rc_send_complete()
1703 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) in hfi1_rc_send_complete()
1712 WARN_ON(!qp->s_rdma_ack_cnt); in hfi1_rc_send_complete()
1713 qp->s_rdma_ack_cnt--; in hfi1_rc_send_complete()
1730 head = priv->s_tid_head; in hfi1_rc_send_complete()
1731 tail = priv->s_tid_cur; in hfi1_rc_send_complete()
1742 if (head == tail && req->comp_seg < req->total_segs) { in hfi1_rc_send_complete()
1744 tail = qp->s_size - 1; in hfi1_rc_send_complete()
1746 tail -= 1; in hfi1_rc_send_complete()
1749 head = qp->s_tail; in hfi1_rc_send_complete()
1750 tail = qp->s_acked; in hfi1_rc_send_complete()
1760 !(qp->s_flags & in hfi1_rc_send_complete()
1762 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in hfi1_rc_send_complete()
1764 rvt_add_retry_timer_ext(qp, priv->timeout_shift); in hfi1_rc_send_complete()
1774 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) && in hfi1_rc_send_complete()
1775 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in hfi1_rc_send_complete()
1781 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_send_complete()
1783 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && in hfi1_rc_send_complete()
1784 req->ack_seg < req->cur_seg) in hfi1_rc_send_complete()
1788 while (qp->s_last != qp->s_acked) { in hfi1_rc_send_complete()
1789 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_rc_send_complete()
1790 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && in hfi1_rc_send_complete()
1791 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) in hfi1_rc_send_complete()
1794 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); in hfi1_rc_send_complete()
1797 ib_hfi1_wc_opcode[wqe->wr.opcode], in hfi1_rc_send_complete()
1801 * If we were waiting for sends to complete before re-sending, in hfi1_rc_send_complete()
1805 if (qp->s_flags & RVT_S_WAIT_PSN && in hfi1_rc_send_complete()
1806 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { in hfi1_rc_send_complete()
1807 qp->s_flags &= ~RVT_S_WAIT_PSN; in hfi1_rc_send_complete()
1808 qp->s_sending_psn = qp->s_psn; in hfi1_rc_send_complete()
1809 qp->s_sending_hpsn = qp->s_psn - 1; in hfi1_rc_send_complete()
1816 qp->s_last_psn = psn; in update_last_psn()
1828 struct hfi1_qp_priv *priv = qp->priv; in do_rc_completion()
1830 lockdep_assert_held(&qp->s_lock); in do_rc_completion()
1836 trace_hfi1_rc_completion(qp, wqe->lpsn); in do_rc_completion()
1837 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || in do_rc_completion()
1838 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { in do_rc_completion()
1840 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); in do_rc_completion()
1843 ib_hfi1_wc_opcode[wqe->wr.opcode], in do_rc_completion()
1848 this_cpu_inc(*ibp->rvp.rc_delayed_comp); in do_rc_completion()
1853 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { in do_rc_completion()
1855 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr); in do_rc_completion()
1859 sc5 = ibp->sl_to_sc[sl]; in do_rc_completion()
1865 qp->s_retry = qp->s_retry_cnt; in do_rc_completion()
1874 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) in do_rc_completion()
1875 update_last_psn(qp, wqe->lpsn); in do_rc_completion()
1879 * being resent, we can stop re-sending it since we know the in do_rc_completion()
1882 if (qp->s_acked == qp->s_cur) { in do_rc_completion()
1883 if (++qp->s_cur >= qp->s_size) in do_rc_completion()
1884 qp->s_cur = 0; in do_rc_completion()
1885 qp->s_acked = qp->s_cur; in do_rc_completion()
1886 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in do_rc_completion()
1887 if (qp->s_acked != qp->s_tail) { in do_rc_completion()
1888 qp->s_state = OP(SEND_LAST); in do_rc_completion()
1889 qp->s_psn = wqe->psn; in do_rc_completion()
1892 if (++qp->s_acked >= qp->s_size) in do_rc_completion()
1893 qp->s_acked = 0; in do_rc_completion()
1894 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) in do_rc_completion()
1895 qp->s_draining = 0; in do_rc_completion()
1896 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in do_rc_completion()
1898 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) { in do_rc_completion()
1899 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; in do_rc_completion()
1908 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { in set_restart_qp()
1909 qp->r_flags |= RVT_R_RDMAR_SEQ; in set_restart_qp()
1910 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); in set_restart_qp()
1911 if (list_empty(&qp->rspwait)) { in set_restart_qp()
1912 qp->r_flags |= RVT_R_RSP_SEND; in set_restart_qp()
1914 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in set_restart_qp()
1920 * update_qp_retry_state - Update qp retry state.
1933 struct hfi1_qp_priv *qpriv = qp->priv; in update_qp_retry_state()
1935 qp->s_psn = psn + 1; in update_qp_retry_state()
1943 qp->s_cur = qpriv->s_tid_cur + 1; in update_qp_retry_state()
1944 if (qp->s_cur >= qp->s_size) in update_qp_retry_state()
1945 qp->s_cur = 0; in update_qp_retry_state()
1946 qp->s_state = TID_OP(WRITE_REQ); in update_qp_retry_state()
1948 qp->s_cur = qpriv->s_tid_cur; in update_qp_retry_state()
1949 qp->s_state = TID_OP(WRITE_RESP); in update_qp_retry_state()
1954 * do_rc_ack - process an incoming RC ACK
1969 struct hfi1_qp_priv *qpriv = qp->priv; in do_rc_ack()
1976 lockdep_assert_held(&qp->s_lock); in do_rc_ack()
1985 ack_psn--; in do_rc_ack()
1986 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in do_rc_ack()
1993 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { in do_rc_ack()
2000 if (wqe->wr.opcode == IB_WR_RDMA_READ && in do_rc_ack()
2015 if ((wqe->wr.opcode == IB_WR_RDMA_READ && in do_rc_ack()
2017 (wqe->wr.opcode == IB_WR_TID_RDMA_READ && in do_rc_ack()
2019 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in do_rc_ack()
2020 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && in do_rc_ack()
2022 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && in do_rc_ack()
2023 (delta_psn(psn, qp->s_last_psn) != 1))) { in do_rc_ack()
2031 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in do_rc_ack()
2032 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { in do_rc_ack()
2033 u64 *vaddr = wqe->sg_list[0].vaddr; in do_rc_ack()
2036 if (wqe->wr.opcode == IB_WR_OPFN) in do_rc_ack()
2039 if (qp->s_num_rd_atomic && in do_rc_ack()
2040 (wqe->wr.opcode == IB_WR_RDMA_READ || in do_rc_ack()
2041 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in do_rc_ack()
2042 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { in do_rc_ack()
2043 qp->s_num_rd_atomic--; in do_rc_ack()
2045 if ((qp->s_flags & RVT_S_WAIT_FENCE) && in do_rc_ack()
2046 !qp->s_num_rd_atomic) { in do_rc_ack()
2047 qp->s_flags &= ~(RVT_S_WAIT_FENCE | in do_rc_ack()
2050 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { in do_rc_ack()
2051 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | in do_rc_ack()
2061 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in do_rc_ack()
2065 if (qp->s_acked == qp->s_tail) in do_rc_ack()
2073 this_cpu_inc(*ibp->rvp.rc_acks); in do_rc_ack()
2074 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in do_rc_ack()
2075 if (wqe_to_tid_req(wqe)->ack_pending) in do_rc_ack()
2077 qpriv->timeout_shift); in do_rc_ack()
2080 } else if (qp->s_acked != qp->s_tail) { in do_rc_ack()
2083 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID) in do_rc_ack()
2084 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); in do_rc_ack()
2090 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE && in do_rc_ack()
2104 if (cmp_psn(psn, qp->s_last_psn + 1)) { in do_rc_ack()
2112 if (qp->s_cur != qp->s_tail && in do_rc_ack()
2113 cmp_psn(qp->s_psn, psn) <= 0) in do_rc_ack()
2115 __w->psn, in do_rc_ack()
2116 __w->lpsn); in do_rc_ack()
2117 else if (--qpriv->pending_tid_w_resp) in do_rc_ack()
2128 * We can stop re-sending the earlier packets in do_rc_ack()
2132 if (cmp_psn(qp->s_psn, psn) <= 0) in do_rc_ack()
2136 /* No more acks - kill all timers */ in do_rc_ack()
2138 if (cmp_psn(qp->s_psn, psn) <= 0) { in do_rc_ack()
2139 qp->s_state = OP(SEND_LAST); in do_rc_ack()
2140 qp->s_psn = psn + 1; in do_rc_ack()
2143 if (qp->s_flags & RVT_S_WAIT_ACK) { in do_rc_ack()
2144 qp->s_flags &= ~RVT_S_WAIT_ACK; in do_rc_ack()
2148 qp->s_rnr_retry = qp->s_rnr_retry_cnt; in do_rc_ack()
2149 qp->s_retry = qp->s_retry_cnt; in do_rc_ack()
2155 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && in do_rc_ack()
2157 cmp_psn(psn, wqe->psn) >= 0) in do_rc_ack()
2163 ibp->rvp.n_rnr_naks++; in do_rc_ack()
2164 if (qp->s_acked == qp->s_tail) in do_rc_ack()
2166 if (qp->s_flags & RVT_S_WAIT_RNR) in do_rc_ack()
2168 rdi = ib_to_rvt(qp->ibqp.device); in do_rc_ack()
2169 if (!(rdi->post_parms[wqe->wr.opcode].flags & in do_rc_ack()
2171 if (qp->s_rnr_retry == 0) { in do_rc_ack()
2175 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0) in do_rc_ack()
2176 qp->s_rnr_retry--; in do_rc_ack()
2185 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { in do_rc_ack()
2186 reset_psn(qp, qp->s_last_psn + 1); in do_rc_ack()
2188 update_last_psn(qp, psn - 1); in do_rc_ack()
2192 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); in do_rc_ack()
2193 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); in do_rc_ack()
2199 if (qp->s_acked == qp->s_tail) in do_rc_ack()
2202 update_last_psn(qp, psn - 1); in do_rc_ack()
2206 ibp->rvp.n_seq_naks++; in do_rc_ack()
2219 ibp->rvp.n_other_naks++; in do_rc_ack()
2224 ibp->rvp.n_other_naks++; in do_rc_ack()
2229 ibp->rvp.n_other_naks++; in do_rc_ack()
2231 if (qp->s_last == qp->s_acked) { in do_rc_ack()
2232 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) in do_rc_ack()
2244 qp->s_retry = qp->s_retry_cnt; in do_rc_ack()
2245 qp->s_rnr_retry = qp->s_rnr_retry_cnt; in do_rc_ack()
2268 lockdep_assert_held(&qp->s_lock); in rdma_seq_err()
2272 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rdma_seq_err()
2274 while (cmp_psn(psn, wqe->lpsn) > 0) { in rdma_seq_err()
2275 if (wqe->wr.opcode == IB_WR_RDMA_READ || in rdma_seq_err()
2276 wqe->wr.opcode == IB_WR_TID_RDMA_READ || in rdma_seq_err()
2277 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE || in rdma_seq_err()
2278 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in rdma_seq_err()
2279 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) in rdma_seq_err()
2284 ibp->rvp.n_rdma_seq++; in rdma_seq_err()
2285 qp->r_flags |= RVT_R_RDMAR_SEQ; in rdma_seq_err()
2286 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); in rdma_seq_err()
2287 if (list_empty(&qp->rspwait)) { in rdma_seq_err()
2288 qp->r_flags |= RVT_R_RSP_SEND; in rdma_seq_err()
2290 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in rdma_seq_err()
2295 * rc_rcv_resp - process an incoming RC response packet
2304 struct hfi1_ctxtdata *rcd = packet->rcd; in rc_rcv_resp()
2305 void *data = packet->payload; in rc_rcv_resp()
2306 u32 tlen = packet->tlen; in rc_rcv_resp()
2307 struct rvt_qp *qp = packet->qp; in rc_rcv_resp()
2309 struct ib_other_headers *ohdr = packet->ohdr; in rc_rcv_resp()
2316 u32 psn = ib_bth_get_psn(packet->ohdr); in rc_rcv_resp()
2317 u32 pmtu = qp->pmtu; in rc_rcv_resp()
2318 u16 hdrsize = packet->hlen; in rc_rcv_resp()
2319 u8 opcode = packet->opcode; in rc_rcv_resp()
2320 u8 pad = packet->pad; in rc_rcv_resp()
2321 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); in rc_rcv_resp()
2323 spin_lock_irqsave(&qp->s_lock, flags); in rc_rcv_resp()
2327 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) in rc_rcv_resp()
2331 diff = cmp_psn(psn, qp->s_last_psn); in rc_rcv_resp()
2335 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2346 if (qp->r_flags & RVT_R_RDMAR_SEQ) { in rc_rcv_resp()
2347 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) in rc_rcv_resp()
2349 qp->r_flags &= ~RVT_R_RDMAR_SEQ; in rc_rcv_resp()
2352 if (unlikely(qp->s_acked == qp->s_tail)) in rc_rcv_resp()
2354 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rc_rcv_resp()
2361 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2363 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth); in rc_rcv_resp()
2369 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rc_rcv_resp()
2370 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) in rc_rcv_resp()
2377 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, in rc_rcv_resp()
2383 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) in rc_rcv_resp()
2385 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) in rc_rcv_resp()
2390 if (unlikely(pmtu >= qp->s_rdma_read_len)) in rc_rcv_resp()
2395 * 4.096 usec. * (1 << qp->timeout) in rc_rcv_resp()
2398 if (qp->s_flags & RVT_S_WAIT_ACK) { in rc_rcv_resp()
2399 qp->s_flags &= ~RVT_S_WAIT_ACK; in rc_rcv_resp()
2404 qp->s_retry = qp->s_retry_cnt; in rc_rcv_resp()
2410 qp->s_rdma_read_len -= pmtu; in rc_rcv_resp()
2412 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_resp()
2413 rvt_copy_sge(qp, &qp->s_rdma_read_sge, in rc_rcv_resp()
2418 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2432 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rc_rcv_resp()
2433 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, in rc_rcv_resp()
2439 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) in rc_rcv_resp()
2441 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) in rc_rcv_resp()
2450 tlen -= hdrsize + extra_bytes; in rc_rcv_resp()
2451 if (unlikely(tlen != qp->s_rdma_read_len)) in rc_rcv_resp()
2453 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2454 rvt_copy_sge(qp, &qp->s_rdma_read_sge, in rc_rcv_resp()
2456 WARN_ON(qp->s_rdma_read_sge.num_sge); in rc_rcv_resp()
2474 if (qp->s_last == qp->s_acked) { in rc_rcv_resp()
2479 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_resp()
2486 qp->r_adefered = 0; in rc_cancel_ack()
2487 if (list_empty(&qp->rspwait)) in rc_cancel_ack()
2489 list_del_init(&qp->rspwait); in rc_cancel_ack()
2490 qp->r_flags &= ~RVT_R_RSP_NAK; in rc_cancel_ack()
2495 * rc_rcv_error - process an incoming duplicate or error RC packet
2528 if (!qp->r_nak_state) { in rc_rcv_error()
2529 ibp->rvp.n_rc_seqnak++; in rc_rcv_error()
2530 qp->r_nak_state = IB_NAK_PSN_ERROR; in rc_rcv_error()
2532 qp->r_ack_psn = qp->r_psn; in rc_rcv_error()
2544 * Handle a duplicate request. Don't re-execute SEND, RDMA in rc_rcv_error()
2561 ibp->rvp.n_rc_dupreq++; in rc_rcv_error()
2563 spin_lock_irqsave(&qp->s_lock, flags); in rc_rcv_error()
2577 if (!e || e->opcode != OP(RDMA_READ_REQUEST)) in rc_rcv_error()
2580 reth = &ohdr->u.rc.reth; in rc_rcv_error()
2588 offset = delta_psn(psn, e->psn) * qp->pmtu; in rc_rcv_error()
2589 len = be32_to_cpu(reth->length); in rc_rcv_error()
2590 if (unlikely(offset + len != e->rdma_sge.sge_length)) in rc_rcv_error()
2594 u32 rkey = be32_to_cpu(reth->rkey); in rc_rcv_error()
2598 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, in rc_rcv_error()
2603 e->rdma_sge.vaddr = NULL; in rc_rcv_error()
2604 e->rdma_sge.length = 0; in rc_rcv_error()
2605 e->rdma_sge.sge_length = 0; in rc_rcv_error()
2607 e->psn = psn; in rc_rcv_error()
2610 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) in rc_rcv_error()
2611 qp->s_acked_ack_queue = prev; in rc_rcv_error()
2612 qp->s_tail_ack_queue = prev; in rc_rcv_error()
2623 if (!e || e->opcode != (u8)opcode || old_req) in rc_rcv_error()
2625 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) in rc_rcv_error()
2626 qp->s_acked_ack_queue = prev; in rc_rcv_error()
2627 qp->s_tail_ack_queue = prev; in rc_rcv_error()
2642 if (mra == qp->r_head_ack_queue) { in rc_rcv_error()
2643 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_error()
2644 qp->r_nak_state = 0; in rc_rcv_error()
2645 qp->r_ack_psn = qp->r_psn - 1; in rc_rcv_error()
2653 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) in rc_rcv_error()
2654 qp->s_acked_ack_queue = mra; in rc_rcv_error()
2655 qp->s_tail_ack_queue = mra; in rc_rcv_error()
2658 qp->s_ack_state = OP(ACKNOWLEDGE); in rc_rcv_error()
2659 qp->s_flags |= RVT_S_RESP_PENDING; in rc_rcv_error()
2660 qp->r_nak_state = 0; in rc_rcv_error()
2664 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_error()
2681 spin_lock_irqsave(&ppd->cc_log_lock, flags); in log_cca_event()
2683 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); in log_cca_event()
2684 ppd->threshold_event_counter++; in log_cca_event()
2686 cc_event = &ppd->cc_events[ppd->cc_log_idx++]; in log_cca_event()
2687 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) in log_cca_event()
2688 ppd->cc_log_idx = 0; in log_cca_event()
2689 cc_event->lqpn = lqpn & RVT_QPN_MASK; in log_cca_event()
2690 cc_event->rqpn = rqpn & RVT_QPN_MASK; in log_cca_event()
2691 cc_event->sl = sl; in log_cca_event()
2692 cc_event->svc_type = svc_type; in log_cca_event()
2693 cc_event->rlid = rlid; in log_cca_event()
2695 cc_event->timestamp = ktime_get_ns() / 1024; in log_cca_event()
2697 spin_unlock_irqrestore(&ppd->cc_log_lock, flags); in log_cca_event()
2718 * 1) increase CCTI (for this SL) in process_becn()
2722 ccti_limit = cc_state->cct.ccti_limit; in process_becn()
2723 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; in process_becn()
2724 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; in process_becn()
2726 cc_state->cong_setting.entries[sl].trigger_threshold; in process_becn()
2728 spin_lock_irqsave(&ppd->cca_timer_lock, flags); in process_becn()
2730 cca_timer = &ppd->cca_timer[sl]; in process_becn()
2731 if (cca_timer->ccti < ccti_limit) { in process_becn()
2732 if (cca_timer->ccti + ccti_incr <= ccti_limit) in process_becn()
2733 cca_timer->ccti += ccti_incr; in process_becn()
2735 cca_timer->ccti = ccti_limit; in process_becn()
2739 ccti = cca_timer->ccti; in process_becn()
2741 if (!hrtimer_active(&cca_timer->hrtimer)) { in process_becn()
2745 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), in process_becn()
2749 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); in process_becn()
2756 * hfi1_rc_rcv - process an incoming RC packet
2765 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv()
2766 void *data = packet->payload; in hfi1_rc_rcv()
2767 u32 tlen = packet->tlen; in hfi1_rc_rcv()
2768 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv()
2769 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv()
2771 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv()
2772 u32 opcode = packet->opcode; in hfi1_rc_rcv()
2773 u32 hdrsize = packet->hlen; in hfi1_rc_rcv()
2774 u32 psn = ib_bth_get_psn(packet->ohdr); in hfi1_rc_rcv()
2775 u32 pad = packet->pad; in hfi1_rc_rcv()
2777 u32 pmtu = qp->pmtu; in hfi1_rc_rcv()
2784 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); in hfi1_rc_rcv()
2786 lockdep_assert_held(&qp->r_lock); in hfi1_rc_rcv()
2792 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1])); in hfi1_rc_rcv()
2807 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_rcv()
2815 switch (qp->r_state) { in hfi1_rc_rcv()
2850 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) in hfi1_rc_rcv()
2861 qp->r_rcv_len = 0; in hfi1_rc_rcv()
2874 qp->r_rcv_len += pmtu; in hfi1_rc_rcv()
2875 if (unlikely(qp->r_rcv_len > qp->r_len)) in hfi1_rc_rcv()
2877 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false); in hfi1_rc_rcv()
2897 qp->r_rcv_len = 0; in hfi1_rc_rcv()
2905 wc.ex.imm_data = ohdr->u.imm_data; in hfi1_rc_rcv()
2910 rkey = be32_to_cpu(ohdr->u.ieth); in hfi1_rc_rcv()
2929 tlen -= (hdrsize + extra_bytes); in hfi1_rc_rcv()
2930 wc.byte_len = tlen + qp->r_rcv_len; in hfi1_rc_rcv()
2931 if (unlikely(wc.byte_len > qp->r_len)) in hfi1_rc_rcv()
2933 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last); in hfi1_rc_rcv()
2934 rvt_put_ss(&qp->r_sge); in hfi1_rc_rcv()
2935 qp->r_msn++; in hfi1_rc_rcv()
2936 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) in hfi1_rc_rcv()
2938 wc.wr_id = qp->r_wr_id; in hfi1_rc_rcv()
2945 wc.qp = &qp->ibqp; in hfi1_rc_rcv()
2946 wc.src_qp = qp->remote_qpn; in hfi1_rc_rcv()
2947 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; in hfi1_rc_rcv()
2957 * See also OPA Vol. 1, section 9.7.6, and table 9-17. in hfi1_rc_rcv()
2959 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); in hfi1_rc_rcv()
2974 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) in hfi1_rc_rcv()
2977 reth = &ohdr->u.rc.reth; in hfi1_rc_rcv()
2978 qp->r_len = be32_to_cpu(reth->length); in hfi1_rc_rcv()
2979 qp->r_rcv_len = 0; in hfi1_rc_rcv()
2980 qp->r_sge.sg_list = NULL; in hfi1_rc_rcv()
2981 if (qp->r_len != 0) { in hfi1_rc_rcv()
2982 u32 rkey = be32_to_cpu(reth->rkey); in hfi1_rc_rcv()
2987 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, in hfi1_rc_rcv()
2991 qp->r_sge.num_sge = 1; in hfi1_rc_rcv()
2993 qp->r_sge.num_sge = 0; in hfi1_rc_rcv()
2994 qp->r_sge.sge.mr = NULL; in hfi1_rc_rcv()
2995 qp->r_sge.sge.vaddr = NULL; in hfi1_rc_rcv()
2996 qp->r_sge.sge.length = 0; in hfi1_rc_rcv()
2997 qp->r_sge.sge.sge_length = 0; in hfi1_rc_rcv()
3008 rvt_put_ss(&qp->r_sge); in hfi1_rc_rcv()
3011 wc.ex.imm_data = ohdr->u.rc.imm_data; in hfi1_rc_rcv()
3020 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) in hfi1_rc_rcv()
3022 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv()
3024 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv()
3026 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv()
3027 if (unlikely(next == qp->s_acked_ack_queue)) { in hfi1_rc_rcv()
3028 if (!qp->s_ack_queue[next].sent) in hfi1_rc_rcv()
3032 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv()
3034 reth = &ohdr->u.rc.reth; in hfi1_rc_rcv()
3035 len = be32_to_cpu(reth->length); in hfi1_rc_rcv()
3037 u32 rkey = be32_to_cpu(reth->rkey); in hfi1_rc_rcv()
3042 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, in hfi1_rc_rcv()
3050 qp->r_psn += rvt_div_mtu(qp, len - 1); in hfi1_rc_rcv()
3052 e->rdma_sge.mr = NULL; in hfi1_rc_rcv()
3053 e->rdma_sge.vaddr = NULL; in hfi1_rc_rcv()
3054 e->rdma_sge.length = 0; in hfi1_rc_rcv()
3055 e->rdma_sge.sge_length = 0; in hfi1_rc_rcv()
3057 e->opcode = opcode; in hfi1_rc_rcv()
3058 e->sent = 0; in hfi1_rc_rcv()
3059 e->psn = psn; in hfi1_rc_rcv()
3060 e->lpsn = qp->r_psn; in hfi1_rc_rcv()
3066 qp->r_msn++; in hfi1_rc_rcv()
3067 qp->r_psn++; in hfi1_rc_rcv()
3068 qp->r_state = opcode; in hfi1_rc_rcv()
3069 qp->r_nak_state = 0; in hfi1_rc_rcv()
3070 qp->r_head_ack_queue = next; in hfi1_rc_rcv()
3071 qpriv->r_tid_alloc = qp->r_head_ack_queue; in hfi1_rc_rcv()
3074 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv()
3076 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv()
3079 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3085 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth; in hfi1_rc_rcv()
3095 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) && in hfi1_rc_rcv()
3098 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv()
3099 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv()
3101 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv()
3102 if (unlikely(next == qp->s_acked_ack_queue)) { in hfi1_rc_rcv()
3103 if (!qp->s_ack_queue[next].sent) in hfi1_rc_rcv()
3107 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv()
3114 if (unlikely(vaddr & (sizeof(u64) - 1))) in hfi1_rc_rcv()
3116 rkey = be32_to_cpu(ateth->rkey); in hfi1_rc_rcv()
3118 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), in hfi1_rc_rcv()
3123 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; in hfi1_rc_rcv()
3125 e->atomic_data = (opcode == OP(FETCH_ADD)) ? in hfi1_rc_rcv()
3126 (u64)atomic64_add_return(sdata, maddr) - sdata : in hfi1_rc_rcv()
3127 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, in hfi1_rc_rcv()
3130 rvt_put_mr(qp->r_sge.sge.mr); in hfi1_rc_rcv()
3131 qp->r_sge.num_sge = 0; in hfi1_rc_rcv()
3133 e->opcode = opcode; in hfi1_rc_rcv()
3134 e->sent = 0; in hfi1_rc_rcv()
3135 e->psn = psn; in hfi1_rc_rcv()
3136 e->lpsn = psn; in hfi1_rc_rcv()
3137 qp->r_msn++; in hfi1_rc_rcv()
3138 qp->r_psn++; in hfi1_rc_rcv()
3139 qp->r_state = opcode; in hfi1_rc_rcv()
3140 qp->r_nak_state = 0; in hfi1_rc_rcv()
3141 qp->r_head_ack_queue = next; in hfi1_rc_rcv()
3142 qpriv->r_tid_alloc = qp->r_head_ack_queue; in hfi1_rc_rcv()
3145 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv()
3147 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv()
3150 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3158 qp->r_psn++; in hfi1_rc_rcv()
3159 qp->r_state = opcode; in hfi1_rc_rcv()
3160 qp->r_ack_psn = psn; in hfi1_rc_rcv()
3161 qp->r_nak_state = 0; in hfi1_rc_rcv()
3164 if (packet->numpkt == 0 || fecn || in hfi1_rc_rcv()
3165 qp->r_adefered >= HFI1_PSN_CREDIT) { in hfi1_rc_rcv()
3169 qp->r_adefered++; in hfi1_rc_rcv()
3175 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; in hfi1_rc_rcv()
3176 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3183 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; in hfi1_rc_rcv()
3184 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3190 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3193 qp->r_nak_state = IB_NAK_INVALID_REQUEST; in hfi1_rc_rcv()
3194 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3200 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3203 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; in hfi1_rc_rcv()
3204 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3222 psn = ib_bth_get_psn(packet->ohdr); in hfi1_rc_hdrerr()
3223 opcode = ib_bth_get_opcode(packet->ohdr); in hfi1_rc_hdrerr()
3227 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_hdrerr()
3228 if (!qp->r_nak_state && diff >= 0) { in hfi1_rc_hdrerr()
3229 ibp->rvp.n_rc_seqnak++; in hfi1_rc_hdrerr()
3230 qp->r_nak_state = IB_NAK_PSN_ERROR; in hfi1_rc_hdrerr()
3232 qp->r_ack_psn = qp->r_psn; in hfi1_rc_hdrerr()