Lines Matching +full:mixed +full:- +full:signals
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright(c) 2018 - 2020 Intel Corporation.
18 * This is an end-to-end protocol at the hfi1 level between two nodes that
24 * -- The total data length should be greater than 256K;
25 * -- The total data length should be a multiple of 4K page size;
26 * -- Each local scatter-gather entry should be 4K page aligned;
27 * -- Each local scatter-gather entry should be a multiple of 4K page size;
56 #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1)
97 * N - the context Number
98 * K - the Kdeth_qp
99 * M - Max_len
100 * T - Timeout
101 * D - reserveD
102 * V - version
103 * U - Urg capable
104 * J - Jkey
105 * R - max_Read
106 * W - max_Write
107 * C - Capcode
139 if (priv->r_tid_ack == HFI1_QP_WQE_INVALID) in validate_r_tid_ack()
140 priv->r_tid_ack = priv->r_tid_tail; in validate_r_tid_ack()
145 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_schedule_ack()
147 priv->s_flags |= RVT_S_ACK_PENDING; in tid_rdma_schedule_ack()
153 validate_r_tid_ack(qp->priv); in tid_rdma_trigger_ack()
160 (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) << in tid_rdma_opfn_encode()
162 ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) << in tid_rdma_opfn_encode()
164 (((u64)((p->max_len >> PAGE_SHIFT) - 1) & in tid_rdma_opfn_encode()
166 (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) << in tid_rdma_opfn_encode()
168 (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) | in tid_rdma_opfn_encode()
169 (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) | in tid_rdma_opfn_encode()
170 (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) << in tid_rdma_opfn_encode()
172 (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) << in tid_rdma_opfn_encode()
178 p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) & in tid_rdma_opfn_decode()
180 p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK; in tid_rdma_opfn_decode()
181 p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) & in tid_rdma_opfn_decode()
183 p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) & in tid_rdma_opfn_decode()
185 p->qp = in tid_rdma_opfn_decode()
189 p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK; in tid_rdma_opfn_decode()
190 p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK; in tid_rdma_opfn_decode()
195 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_opfn_init()
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt; in tid_rdma_opfn_init()
198 p->max_len = TID_RDMA_MAX_SEGMENT_SIZE; in tid_rdma_opfn_init()
199 p->jkey = priv->rcd->jkey; in tid_rdma_opfn_init()
200 p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ; in tid_rdma_opfn_init()
201 p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ; in tid_rdma_opfn_init()
202 p->timeout = qp->timeout; in tid_rdma_opfn_init()
203 p->urg = is_urg_masked(priv->rcd); in tid_rdma_opfn_init()
208 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_conn_req()
210 *data = tid_rdma_opfn_encode(&priv->tid_rdma.local); in tid_rdma_conn_req()
216 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_conn_reply()
220 old = rcu_dereference_protected(priv->tid_rdma.remote, in tid_rdma_conn_reply()
221 lockdep_is_held(&priv->opfn.lock)); in tid_rdma_conn_reply()
243 priv->tid_timer_timeout_jiffies = in tid_rdma_conn_reply()
244 usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) / in tid_rdma_conn_reply()
246 trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local); in tid_rdma_conn_reply()
248 rcu_assign_pointer(priv->tid_rdma.remote, remote); in tid_rdma_conn_reply()
251 * remote->max_len only when the request's data length is smaller in tid_rdma_conn_reply()
252 * than remote->max_len. In that case, there will be only one segment. in tid_rdma_conn_reply()
253 * Therefore, when priv->pkts_ps is used to calculate req->cur_seg in tid_rdma_conn_reply()
254 * during retry, it will lead to req->cur_seg = 0, which is exactly in tid_rdma_conn_reply()
257 priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len); in tid_rdma_conn_reply()
258 priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1; in tid_rdma_conn_reply()
261 RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); in tid_rdma_conn_reply()
262 priv->timeout_shift = 0; in tid_rdma_conn_reply()
287 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_conn_error()
290 old = rcu_dereference_protected(priv->tid_rdma.remote, in tid_rdma_conn_error()
291 lockdep_is_held(&priv->opfn.lock)); in tid_rdma_conn_error()
292 RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); in tid_rdma_conn_error()
305 rcd->jkey = TID_RDMA_JKEY; in hfi1_kern_exp_rcv_init()
306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); in hfi1_kern_exp_rcv_init()
311 * qp_to_rcd - determine the receive context used by a qp
331 if (qp->ibqp.qp_num == 0) in qp_to_rcd()
334 ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift); in qp_to_rcd()
335 return dd->rcd[ctxt]; in qp_to_rcd()
341 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_qp_priv_init()
344 qpriv->rcd = qp_to_rcd(rdi, qp); in hfi1_qp_priv_init()
346 spin_lock_init(&qpriv->opfn.lock); in hfi1_qp_priv_init()
347 INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request); in hfi1_qp_priv_init()
348 INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume); in hfi1_qp_priv_init()
349 qpriv->flow_state.psn = 0; in hfi1_qp_priv_init()
350 qpriv->flow_state.index = RXE_NUM_TID_FLOWS; in hfi1_qp_priv_init()
351 qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS; in hfi1_qp_priv_init()
352 qpriv->flow_state.generation = KERN_GENERATION_RESERVED; in hfi1_qp_priv_init()
353 qpriv->s_state = TID_OP(WRITE_RESP); in hfi1_qp_priv_init()
354 qpriv->s_tid_cur = HFI1_QP_WQE_INVALID; in hfi1_qp_priv_init()
355 qpriv->s_tid_head = HFI1_QP_WQE_INVALID; in hfi1_qp_priv_init()
356 qpriv->s_tid_tail = HFI1_QP_WQE_INVALID; in hfi1_qp_priv_init()
357 qpriv->rnr_nak_state = TID_RNR_NAK_INIT; in hfi1_qp_priv_init()
358 qpriv->r_tid_head = HFI1_QP_WQE_INVALID; in hfi1_qp_priv_init()
359 qpriv->r_tid_tail = HFI1_QP_WQE_INVALID; in hfi1_qp_priv_init()
360 qpriv->r_tid_ack = HFI1_QP_WQE_INVALID; in hfi1_qp_priv_init()
361 qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID; in hfi1_qp_priv_init()
362 atomic_set(&qpriv->n_requests, 0); in hfi1_qp_priv_init()
363 atomic_set(&qpriv->n_tid_requests, 0); in hfi1_qp_priv_init()
364 timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0); in hfi1_qp_priv_init()
365 timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0); in hfi1_qp_priv_init()
366 INIT_LIST_HEAD(&qpriv->tid_wait); in hfi1_qp_priv_init()
368 if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { in hfi1_qp_priv_init()
369 struct hfi1_devdata *dd = qpriv->rcd->dd; in hfi1_qp_priv_init()
371 qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES * in hfi1_qp_priv_init()
372 sizeof(*qpriv->pages), in hfi1_qp_priv_init()
373 GFP_KERNEL, dd->node); in hfi1_qp_priv_init()
374 if (!qpriv->pages) in hfi1_qp_priv_init()
375 return -ENOMEM; in hfi1_qp_priv_init()
376 for (i = 0; i < qp->s_size; i++) { in hfi1_qp_priv_init()
381 dd->node); in hfi1_qp_priv_init()
383 return -ENOMEM; in hfi1_qp_priv_init()
385 hfi1_init_trdma_req(qp, &priv->tid_req); in hfi1_qp_priv_init()
386 priv->tid_req.e.swqe = wqe; in hfi1_qp_priv_init()
387 wqe->priv = priv; in hfi1_qp_priv_init()
393 dd->node); in hfi1_qp_priv_init()
395 return -ENOMEM; in hfi1_qp_priv_init()
397 hfi1_init_trdma_req(qp, &priv->tid_req); in hfi1_qp_priv_init()
398 priv->tid_req.e.ack = &qp->s_ack_queue[i]; in hfi1_qp_priv_init()
400 ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, in hfi1_qp_priv_init()
406 qp->s_ack_queue[i].priv = priv; in hfi1_qp_priv_init()
415 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_qp_priv_tid_free()
419 if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { in hfi1_qp_priv_tid_free()
420 for (i = 0; i < qp->s_size; i++) { in hfi1_qp_priv_tid_free()
422 kfree(wqe->priv); in hfi1_qp_priv_tid_free()
423 wqe->priv = NULL; in hfi1_qp_priv_tid_free()
426 struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv; in hfi1_qp_priv_tid_free()
429 hfi1_kern_exp_rcv_free_flows(&priv->tid_req); in hfi1_qp_priv_tid_free()
431 qp->s_ack_queue[i].priv = NULL; in hfi1_qp_priv_tid_free()
433 cancel_work_sync(&qpriv->opfn.opfn_work); in hfi1_qp_priv_tid_free()
434 kfree(qpriv->pages); in hfi1_qp_priv_tid_free()
435 qpriv->pages = NULL; in hfi1_qp_priv_tid_free()
471 __must_hold(&rcd->exp_lock) in first_qp()
475 lockdep_assert_held(&rcd->exp_lock); in first_qp()
476 priv = list_first_entry_or_null(&queue->queue_head, in first_qp()
481 rvt_get_qp(priv->owner); in first_qp()
482 return priv->owner; in first_qp()
486 * kernel_tid_waiters - determine rcd wait
501 * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
506 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in kernel_tid_waiters()
511 lockdep_assert_held(&qp->s_lock); in kernel_tid_waiters()
512 lockdep_assert_held(&rcd->exp_lock); in kernel_tid_waiters()
514 if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE))) in kernel_tid_waiters()
521 * dequeue_tid_waiter - dequeue the qp from the list
539 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in dequeue_tid_waiter()
541 struct hfi1_qp_priv *priv = qp->priv; in dequeue_tid_waiter()
543 lockdep_assert_held(&qp->s_lock); in dequeue_tid_waiter()
544 lockdep_assert_held(&rcd->exp_lock); in dequeue_tid_waiter()
545 if (list_empty(&priv->tid_wait)) in dequeue_tid_waiter()
547 list_del_init(&priv->tid_wait); in dequeue_tid_waiter()
548 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; in dequeue_tid_waiter()
549 queue->dequeue++; in dequeue_tid_waiter()
554 * queue_qp_for_tid_wait - suspend QP on tid space
566 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in queue_qp_for_tid_wait()
568 struct hfi1_qp_priv *priv = qp->priv; in queue_qp_for_tid_wait()
570 lockdep_assert_held(&qp->s_lock); in queue_qp_for_tid_wait()
571 lockdep_assert_held(&rcd->exp_lock); in queue_qp_for_tid_wait()
572 if (list_empty(&priv->tid_wait)) { in queue_qp_for_tid_wait()
573 qp->s_flags |= HFI1_S_WAIT_TID_SPACE; in queue_qp_for_tid_wait()
574 list_add_tail(&priv->tid_wait, &queue->queue_head); in queue_qp_for_tid_wait()
575 priv->tid_enqueue = ++queue->enqueue; in queue_qp_for_tid_wait()
576 rcd->dd->verbs_dev.n_tidwait++; in queue_qp_for_tid_wait()
583 * __trigger_tid_waiter - trigger tid waiter
587 * assuming the caller is holding the qp->s_lock.
590 __must_hold(&qp->s_lock) in __trigger_tid_waiter()
592 lockdep_assert_held(&qp->s_lock); in __trigger_tid_waiter()
593 if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE)) in __trigger_tid_waiter()
600 * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
622 priv = qp->priv; in tid_rdma_schedule_tid_wakeup()
623 ibp = to_iport(qp->ibqp.device, qp->port_num); in tid_rdma_schedule_tid_wakeup()
625 dd = dd_from_ibdev(qp->ibqp.device); in tid_rdma_schedule_tid_wakeup()
627 rval = queue_work_on(priv->s_sde ? in tid_rdma_schedule_tid_wakeup()
628 priv->s_sde->cpu : in tid_rdma_schedule_tid_wakeup()
629 cpumask_first(cpumask_of_node(dd->node)), in tid_rdma_schedule_tid_wakeup()
630 ppd->hfi1_wq, in tid_rdma_schedule_tid_wakeup()
631 &priv->tid_rdma.trigger_work); in tid_rdma_schedule_tid_wakeup()
637 * tid_rdma_trigger_resume - field a trigger work request
651 qp = priv->owner; in tid_rdma_trigger_resume()
652 spin_lock_irq(&qp->s_lock); in tid_rdma_trigger_resume()
653 if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) { in tid_rdma_trigger_resume()
654 spin_unlock_irq(&qp->s_lock); in tid_rdma_trigger_resume()
655 hfi1_do_send(priv->owner, true); in tid_rdma_trigger_resume()
657 spin_unlock_irq(&qp->s_lock); in tid_rdma_trigger_resume()
663 * tid_rdma_flush_wait - unwind any tid space wait
670 __must_hold(&qp->s_lock) in _tid_rdma_flush_wait()
676 lockdep_assert_held(&qp->s_lock); in _tid_rdma_flush_wait()
677 priv = qp->priv; in _tid_rdma_flush_wait()
678 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; in _tid_rdma_flush_wait()
679 spin_lock(&priv->rcd->exp_lock); in _tid_rdma_flush_wait()
680 if (!list_empty(&priv->tid_wait)) { in _tid_rdma_flush_wait()
681 list_del_init(&priv->tid_wait); in _tid_rdma_flush_wait()
682 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; in _tid_rdma_flush_wait()
683 queue->dequeue++; in _tid_rdma_flush_wait()
686 spin_unlock(&priv->rcd->exp_lock); in _tid_rdma_flush_wait()
690 __must_hold(&qp->s_lock) in hfi1_tid_rdma_flush_wait()
692 struct hfi1_qp_priv *priv = qp->priv; in hfi1_tid_rdma_flush_wait()
694 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue); in hfi1_tid_rdma_flush_wait()
695 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue); in hfi1_tid_rdma_flush_wait()
700 * kern_reserve_flow - allocate a hardware flow
713 * On success: a value positive value between 0 and RXE_NUM_TID_FLOWS - 1
714 * On failure: -EAGAIN
717 __must_hold(&rcd->exp_lock) in kern_reserve_flow()
723 !test_and_set_bit(last, &rcd->flow_mask)) in kern_reserve_flow()
726 nr = ffz(rcd->flow_mask); in kern_reserve_flow()
728 (sizeof(rcd->flow_mask) * BITS_PER_BYTE)); in kern_reserve_flow()
729 if (nr > (RXE_NUM_TID_FLOWS - 1)) in kern_reserve_flow()
730 return -EAGAIN; in kern_reserve_flow()
731 set_bit(nr, &rcd->flow_mask); in kern_reserve_flow()
750 write_uctxt_csr(rcd->dd, rcd->ctxt, in kern_set_hw_flow()
755 __must_hold(&rcd->exp_lock) in kern_setup_hw_flow()
757 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow()
773 __must_hold(&rcd->exp_lock) in kern_clear_hw_flow()
775 rcd->flows[flow_idx].generation = in kern_clear_hw_flow()
776 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow()
782 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; in hfi1_kern_setup_hw_flow()
783 struct tid_flow_state *fs = &qpriv->flow_state; in hfi1_kern_setup_hw_flow()
789 if (fs->index != RXE_NUM_TID_FLOWS) in hfi1_kern_setup_hw_flow()
792 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
793 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp)) in hfi1_kern_setup_hw_flow()
796 ret = kern_reserve_flow(rcd, fs->last_index); in hfi1_kern_setup_hw_flow()
799 fs->index = ret; in hfi1_kern_setup_hw_flow()
800 fs->last_index = fs->index; in hfi1_kern_setup_hw_flow()
803 if (fs->generation != KERN_GENERATION_RESERVED) in hfi1_kern_setup_hw_flow()
804 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow()
805 fs->generation = kern_setup_hw_flow(rcd, fs->index); in hfi1_kern_setup_hw_flow()
806 fs->psn = 0; in hfi1_kern_setup_hw_flow()
807 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
809 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_setup_hw_flow()
810 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
815 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
816 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
817 return -EAGAIN; in hfi1_kern_setup_hw_flow()
822 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; in hfi1_kern_clear_hw_flow()
823 struct tid_flow_state *fs = &qpriv->flow_state; in hfi1_kern_clear_hw_flow()
827 if (fs->index >= RXE_NUM_TID_FLOWS) in hfi1_kern_clear_hw_flow()
829 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_clear_hw_flow()
830 kern_clear_hw_flow(rcd, fs->index); in hfi1_kern_clear_hw_flow()
831 clear_bit(fs->index, &rcd->flow_mask); in hfi1_kern_clear_hw_flow()
832 fs->index = RXE_NUM_TID_FLOWS; in hfi1_kern_clear_hw_flow()
833 fs->psn = 0; in hfi1_kern_clear_hw_flow()
834 fs->generation = KERN_GENERATION_RESERVED; in hfi1_kern_clear_hw_flow()
837 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_clear_hw_flow()
838 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_clear_hw_flow()
853 rcd->flows[i].generation = mask_generation(get_random_u32()); in hfi1_kern_init_ctxt_generations()
861 u8 count = s->count; in trdma_pset_order()
867 * tid_rdma_find_phys_blocks_4k - get groups base on mr info
898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); in tid_rdma_find_phys_blocks_4k()
901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, in tid_rdma_find_phys_blocks_4k()
935 trace_hfi1_tid_pageset(flow->req->qp, setcount, in tid_rdma_find_phys_blocks_4k()
938 pagecount -= maxpages; in tid_rdma_find_phys_blocks_4k()
957 * tid_flush_pages - dump out pages into pagesets
989 pages -= maxpages; in tid_flush_pages()
998 * tid_rdma_find_phys_blocks_8k - get groups base on mr info
1008 * pages and i - 1 and i contiguous pages.
1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); in tid_rdma_find_phys_blocks_8k()
1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); in tid_rdma_find_phys_blocks_8k()
1055 /* i,i+1 consecutive, look at i-1,i */ in tid_rdma_find_phys_blocks_8k()
1063 /* save i-1 */ in tid_rdma_find_phys_blocks_8k()
1068 sets = tid_flush_pages(list, &idx, npages - idx, sets); in tid_rdma_find_phys_blocks_8k()
1079 * copy maintained in @ss->sge, the original sge is not modified.
1081 * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
1091 struct tid_rdma_request *req = flow->req; in kern_find_pages()
1092 struct rvt_sge *sge = &ss->sge; in kern_find_pages()
1093 u32 length = flow->req->seg_len; in kern_find_pages()
1097 while (length && req->isge < ss->num_sge) { in kern_find_pages()
1098 pages[i++] = virt_to_page(sge->vaddr); in kern_find_pages()
1100 sge->vaddr += len; in kern_find_pages()
1101 sge->length -= len; in kern_find_pages()
1102 sge->sge_length -= len; in kern_find_pages()
1103 if (!sge->sge_length) { in kern_find_pages()
1104 if (++req->isge < ss->num_sge) in kern_find_pages()
1105 *sge = ss->sg_list[req->isge - 1]; in kern_find_pages()
1106 } else if (sge->length == 0 && sge->mr->lkey) { in kern_find_pages()
1107 if (++sge->n >= RVT_SEGSZ) { in kern_find_pages()
1108 ++sge->m; in kern_find_pages()
1109 sge->n = 0; in kern_find_pages()
1111 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in kern_find_pages()
1112 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in kern_find_pages()
1114 length -= len; in kern_find_pages()
1117 flow->length = flow->req->seg_len - length; in kern_find_pages()
1118 *last = req->isge != ss->num_sge; in kern_find_pages()
1128 dd = flow->req->rcd->dd; in dma_unmap_flow()
1129 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; in dma_unmap_flow()
1131 if (pset->count && pset->addr) { in dma_unmap_flow()
1132 dma_unmap_page(&dd->pcidev->dev, in dma_unmap_flow()
1133 pset->addr, in dma_unmap_flow()
1134 PAGE_SIZE * pset->count, in dma_unmap_flow()
1136 pset->mapped = 0; in dma_unmap_flow()
1144 struct hfi1_devdata *dd = flow->req->rcd->dd; in dma_map_flow()
1147 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; in dma_map_flow()
1149 if (pset->count) { in dma_map_flow()
1150 pset->addr = dma_map_page(&dd->pcidev->dev, in dma_map_flow()
1151 pages[pset->idx], in dma_map_flow()
1153 PAGE_SIZE * pset->count, in dma_map_flow()
1156 if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) { in dma_map_flow()
1158 return -ENOMEM; in dma_map_flow()
1160 pset->mapped = 1; in dma_map_flow()
1168 return !!flow->pagesets[0].mapped; in dma_mapped()
1173 * segment. All segments are of length flow->req->seg_len.
1182 if (flow->npagesets) { in kern_get_phys_blocks()
1183 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, in kern_get_phys_blocks()
1192 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096)) in kern_get_phys_blocks()
1193 flow->npagesets = in kern_get_phys_blocks()
1195 flow->pagesets); in kern_get_phys_blocks()
1197 flow->npagesets = in kern_get_phys_blocks()
1199 flow->pagesets); in kern_get_phys_blocks()
1208 struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++]; in kern_add_tid_node()
1210 WARN_ON_ONCE(flow->tnode_cnt >= in kern_add_tid_node()
1213 dd_dev_err(rcd->dd, in kern_add_tid_node()
1215 cnt, grp->map, grp->used); in kern_add_tid_node()
1217 node->grp = grp; in kern_add_tid_node()
1218 node->map = grp->map; in kern_add_tid_node()
1219 node->cnt = cnt; in kern_add_tid_node()
1220 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1, in kern_add_tid_node()
1221 grp->base, grp->map, grp->used, cnt); in kern_add_tid_node()
1228 * modifying grp->map. This is done as follows, being cogizant of the lists
1231 * these groups will move from group->full without affecting used
1232 * 2. If more TID's are needed allocate from used (will move from used->full or
1235 * at a complete group (will move from group->used)
1239 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_alloc_tids()
1240 struct hfi1_devdata *dd = rcd->dd; in kern_alloc_tids()
1245 flow->tnode_cnt = 0; in kern_alloc_tids()
1246 ngroups = flow->npagesets / dd->rcv_entries.group_size; in kern_alloc_tids()
1251 list_for_each_entry(group, &rcd->tid_group_list.list, list) { in kern_alloc_tids()
1253 group->size); in kern_alloc_tids()
1255 pageidx += group->size; in kern_alloc_tids()
1256 if (!--ngroups) in kern_alloc_tids()
1260 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1265 list_for_each_entry(used, &rcd->tid_used_list.list, list) { in kern_alloc_tids()
1266 use = min_t(u32, flow->npagesets - pageidx, in kern_alloc_tids()
1267 used->size - used->used); in kern_alloc_tids()
1271 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1280 if (group && &group->list == &rcd->tid_group_list.list) in kern_alloc_tids()
1282 group = list_prepare_entry(group, &rcd->tid_group_list.list, in kern_alloc_tids()
1284 if (list_is_last(&group->list, &rcd->tid_group_list.list)) in kern_alloc_tids()
1287 use = min_t(u32, flow->npagesets - pageidx, group->size); in kern_alloc_tids()
1290 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1293 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ", in kern_alloc_tids()
1294 (u64)flow->npagesets); in kern_alloc_tids()
1295 return -EAGAIN; in kern_alloc_tids()
1303 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_program_rcv_group()
1304 struct hfi1_devdata *dd = rcd->dd; in kern_program_rcv_group()
1305 struct kern_tid_node *node = &flow->tnode[grp_num]; in kern_program_rcv_group()
1306 struct tid_group *grp = node->grp; in kern_program_rcv_group()
1308 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT; in kern_program_rcv_group()
1312 for (i = 0; i < grp->size; i++) { in kern_program_rcv_group()
1313 rcventry = grp->base + i; in kern_program_rcv_group()
1315 if (node->map & BIT(i) || cnt >= node->cnt) { in kern_program_rcv_group()
1319 pset = &flow->pagesets[(*pset_idx)++]; in kern_program_rcv_group()
1320 if (pset->count) { in kern_program_rcv_group()
1322 pset->addr, trdma_pset_order(pset)); in kern_program_rcv_group()
1326 npages += pset->count; in kern_program_rcv_group()
1328 rcventry -= rcd->expected_base; in kern_program_rcv_group()
1337 pair = !(i & 0x1) && !((node->map >> i) & 0x3) && in kern_program_rcv_group()
1338 node->cnt >= cnt + 2; in kern_program_rcv_group()
1340 if (!pset->count) in kern_program_rcv_group()
1342 flow->tid_entry[flow->tidcnt++] = in kern_program_rcv_group()
1347 flow->req->qp, flow->tidcnt - 1, in kern_program_rcv_group()
1348 flow->tid_entry[flow->tidcnt - 1]); in kern_program_rcv_group()
1351 flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg); in kern_program_rcv_group()
1355 if (grp->used == grp->size - 1) in kern_program_rcv_group()
1356 tid_group_move(grp, &rcd->tid_used_list, in kern_program_rcv_group()
1357 &rcd->tid_full_list); in kern_program_rcv_group()
1358 else if (!grp->used) in kern_program_rcv_group()
1359 tid_group_move(grp, &rcd->tid_group_list, in kern_program_rcv_group()
1360 &rcd->tid_used_list); in kern_program_rcv_group()
1362 grp->used++; in kern_program_rcv_group()
1363 grp->map |= BIT(i); in kern_program_rcv_group()
1370 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group()
1371 struct hfi1_devdata *dd = rcd->dd; in kern_unprogram_rcv_group()
1372 struct kern_tid_node *node = &flow->tnode[grp_num]; in kern_unprogram_rcv_group()
1373 struct tid_group *grp = node->grp; in kern_unprogram_rcv_group()
1377 for (i = 0; i < grp->size; i++) { in kern_unprogram_rcv_group()
1378 rcventry = grp->base + i; in kern_unprogram_rcv_group()
1380 if (node->map & BIT(i) || cnt >= node->cnt) { in kern_unprogram_rcv_group()
1387 grp->used--; in kern_unprogram_rcv_group()
1388 grp->map &= ~BIT(i); in kern_unprogram_rcv_group()
1391 if (grp->used == grp->size - 1) in kern_unprogram_rcv_group()
1392 tid_group_move(grp, &rcd->tid_full_list, in kern_unprogram_rcv_group()
1393 &rcd->tid_used_list); in kern_unprogram_rcv_group()
1394 else if (!grp->used) in kern_unprogram_rcv_group()
1395 tid_group_move(grp, &rcd->tid_used_list, in kern_unprogram_rcv_group()
1396 &rcd->tid_group_list); in kern_unprogram_rcv_group()
1399 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group()
1400 struct hfi1_devdata *dd = rcd->dd; in kern_unprogram_rcv_group()
1403 cnt, grp->map, grp->used); in kern_unprogram_rcv_group()
1412 flow->npkts = 0; in kern_program_rcvarray()
1413 flow->tidcnt = 0; in kern_program_rcvarray()
1414 for (i = 0; i < flow->tnode_cnt; i++) in kern_program_rcvarray()
1416 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); in kern_program_rcvarray()
1420 * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
1442 * req->flow_idx is the index of the flow which has been prepared in this
1443 * invocation of function call. With flow = &req->flows[req->flow_idx],
1444 * flow->tid_entry contains the TID array which the sender can use for TID RDMA
1445 * sends and flow->npkts contains number of packets required to send the
1449 * it signals error TID RDMA cannot be used for this sge and this function
1452 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1456 * The function returns -EAGAIN if sufficient number of TID/flow resources to
1463 __must_hold(&req->qp->s_lock) in hfi1_kern_exp_rcv_setup()
1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup()
1466 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_setup()
1467 struct hfi1_qp_priv *qpriv = req->qp->priv; in hfi1_kern_exp_rcv_setup()
1470 u16 clear_tail = req->clear_tail; in hfi1_kern_exp_rcv_setup()
1472 lockdep_assert_held(&req->qp->s_lock); in hfi1_kern_exp_rcv_setup()
1479 if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) || in hfi1_kern_exp_rcv_setup()
1480 CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >= in hfi1_kern_exp_rcv_setup()
1481 req->n_flows) in hfi1_kern_exp_rcv_setup()
1482 return -EINVAL; in hfi1_kern_exp_rcv_setup()
1489 if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) { in hfi1_kern_exp_rcv_setup()
1490 hfi1_wait_kmem(flow->req->qp); in hfi1_kern_exp_rcv_setup()
1491 return -ENOMEM; in hfi1_kern_exp_rcv_setup()
1494 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) in hfi1_kern_exp_rcv_setup()
1519 memset(&flow->flow_state, 0x0, sizeof(flow->flow_state)); in hfi1_kern_exp_rcv_setup()
1520 flow->idx = qpriv->flow_state.index; in hfi1_kern_exp_rcv_setup()
1521 flow->flow_state.generation = qpriv->flow_state.generation; in hfi1_kern_exp_rcv_setup()
1522 flow->flow_state.spsn = qpriv->flow_state.psn; in hfi1_kern_exp_rcv_setup()
1523 flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; in hfi1_kern_exp_rcv_setup()
1524 flow->flow_state.r_next_psn = in hfi1_kern_exp_rcv_setup()
1525 full_flow_psn(flow, flow->flow_state.spsn); in hfi1_kern_exp_rcv_setup()
1526 qpriv->flow_state.psn += flow->npkts; in hfi1_kern_exp_rcv_setup()
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1530 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_setup()
1531 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1534 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1); in hfi1_kern_exp_rcv_setup()
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1538 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1539 return -EAGAIN; in hfi1_kern_exp_rcv_setup()
1544 flow->npagesets = 0; in hfi1_tid_rdma_reset_flow()
1554 __must_hold(&req->qp->s_lock) in hfi1_kern_exp_rcv_clear()
1556 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear()
1557 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_clear()
1562 lockdep_assert_held(&req->qp->s_lock); in hfi1_kern_exp_rcv_clear()
1564 if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) in hfi1_kern_exp_rcv_clear()
1565 return -EINVAL; in hfi1_kern_exp_rcv_clear()
1567 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_clear()
1569 for (i = 0; i < flow->tnode_cnt; i++) in hfi1_kern_exp_rcv_clear()
1572 flow->tnode_cnt = 0; in hfi1_kern_exp_rcv_clear()
1574 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_clear()
1575 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_clear()
1580 req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1); in hfi1_kern_exp_rcv_clear()
1582 if (fqp == req->qp) { in hfi1_kern_exp_rcv_clear()
1597 __must_hold(&req->qp->s_lock) in hfi1_kern_exp_rcv_clear_all()
1600 while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) { in hfi1_kern_exp_rcv_clear_all()
1607 * hfi1_kern_exp_rcv_free_flows - free previously allocated flow information
1612 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows()
1613 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows()
1617 * __trdma_clean_swqe - clean up for large sized QPs
1623 struct hfi1_swqe_priv *p = wqe->priv; in __trdma_clean_swqe()
1625 hfi1_kern_exp_rcv_free_flows(&p->tid_req); in __trdma_clean_swqe()
1637 if (likely(req->flows)) in hfi1_kern_exp_rcv_alloc_flows()
1640 req->rcd->numa_id); in hfi1_kern_exp_rcv_alloc_flows()
1642 return -ENOMEM; in hfi1_kern_exp_rcv_alloc_flows()
1650 req->flows = flows; in hfi1_kern_exp_rcv_alloc_flows()
1657 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_init_trdma_req()
1662 * can be pre-initialized here before the WRs has in hfi1_init_trdma_req()
1664 * However, non-NULL values for these variables do not in hfi1_init_trdma_req()
1669 req->qp = qp; in hfi1_init_trdma_req()
1670 req->rcd = qpriv->rcd; in hfi1_init_trdma_req()
1678 return dd->verbs_dev.n_tidwait; in hfi1_access_sw_tid_wait()
1687 head = req->setup_head; in find_flow_ib()
1688 tail = req->clear_tail; in find_flow_ib()
1691 flow = &req->flows[tail]; in find_flow_ib()
1692 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 && in find_flow_ib()
1693 cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) { in find_flow_ib()
1708 struct tid_rdma_flow *flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_read_packet()
1709 struct rvt_qp *qp = req->qp; in hfi1_build_tid_rdma_read_packet()
1710 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_read_packet()
1711 struct hfi1_swqe_priv *wpriv = wqe->priv; in hfi1_build_tid_rdma_read_packet()
1712 struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req; in hfi1_build_tid_rdma_read_packet()
1718 *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt); in hfi1_build_tid_rdma_read_packet()
1719 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_read_packet()
1722 req_addr = &flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_read_packet()
1723 req_len = sizeof(*flow->tid_entry) * in hfi1_build_tid_rdma_read_packet()
1724 (flow->tidcnt - flow->tid_idx); in hfi1_build_tid_rdma_read_packet()
1726 memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req)); in hfi1_build_tid_rdma_read_packet()
1727 wpriv->ss.sge.vaddr = req_addr; in hfi1_build_tid_rdma_read_packet()
1728 wpriv->ss.sge.sge_length = req_len; in hfi1_build_tid_rdma_read_packet()
1729 wpriv->ss.sge.length = wpriv->ss.sge.sge_length; in hfi1_build_tid_rdma_read_packet()
1734 wpriv->ss.sge.mr = NULL; in hfi1_build_tid_rdma_read_packet()
1735 wpriv->ss.sge.m = 0; in hfi1_build_tid_rdma_read_packet()
1736 wpriv->ss.sge.n = 0; in hfi1_build_tid_rdma_read_packet()
1738 wpriv->ss.sg_list = NULL; in hfi1_build_tid_rdma_read_packet()
1739 wpriv->ss.total_len = wpriv->ss.sge.sge_length; in hfi1_build_tid_rdma_read_packet()
1740 wpriv->ss.num_sge = 1; in hfi1_build_tid_rdma_read_packet()
1744 remote = rcu_dereference(qpriv->tid_rdma.remote); in hfi1_build_tid_rdma_read_packet()
1746 KDETH_RESET(rreq->kdeth0, KVER, 0x1); in hfi1_build_tid_rdma_read_packet()
1747 KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey); in hfi1_build_tid_rdma_read_packet()
1748 rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr + in hfi1_build_tid_rdma_read_packet()
1749 req->cur_seg * req->seg_len + flow->sent); in hfi1_build_tid_rdma_read_packet()
1750 rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_build_tid_rdma_read_packet()
1751 rreq->reth.length = cpu_to_be32(*len); in hfi1_build_tid_rdma_read_packet()
1752 rreq->tid_flow_psn = in hfi1_build_tid_rdma_read_packet()
1753 cpu_to_be32((flow->flow_state.generation << in hfi1_build_tid_rdma_read_packet()
1755 ((flow->flow_state.spsn + flow->pkt) & in hfi1_build_tid_rdma_read_packet()
1757 rreq->tid_flow_qp = in hfi1_build_tid_rdma_read_packet()
1758 cpu_to_be32(qpriv->tid_rdma.local.qp | in hfi1_build_tid_rdma_read_packet()
1759 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_read_packet()
1761 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_read_packet()
1762 rreq->verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_read_packet()
1764 *bth1 |= remote->qp; in hfi1_build_tid_rdma_read_packet()
1769 flow->sent += *len; in hfi1_build_tid_rdma_read_packet()
1770 req->cur_seg++; in hfi1_build_tid_rdma_read_packet()
1771 qp->s_state = TID_OP(READ_REQ); in hfi1_build_tid_rdma_read_packet()
1772 req->ack_pending++; in hfi1_build_tid_rdma_read_packet()
1773 req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1); in hfi1_build_tid_rdma_read_packet()
1774 qpriv->pending_tid_r_segs++; in hfi1_build_tid_rdma_read_packet()
1775 qp->s_num_rd_atomic++; in hfi1_build_tid_rdma_read_packet()
1780 return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32); in hfi1_build_tid_rdma_read_packet()
1790 __must_hold(&qp->s_lock) in hfi1_build_tid_rdma_read_req()
1792 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_read_req()
1800 trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_build_tid_rdma_read_req()
1801 wqe->lpsn, req); in hfi1_build_tid_rdma_read_req()
1807 if (req->state == TID_REQUEST_SYNC) { in hfi1_build_tid_rdma_read_req()
1808 if (qpriv->pending_tid_r_segs) in hfi1_build_tid_rdma_read_req()
1811 hfi1_kern_clear_hw_flow(req->rcd, qp); in hfi1_build_tid_rdma_read_req()
1812 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; in hfi1_build_tid_rdma_read_req()
1813 req->state = TID_REQUEST_ACTIVE; in hfi1_build_tid_rdma_read_req()
1818 * have been allocated before. In this case, req->flow_idx should in hfi1_build_tid_rdma_read_req()
1819 * fall behind req->setup_head. in hfi1_build_tid_rdma_read_req()
1821 if (req->flow_idx == req->setup_head) { in hfi1_build_tid_rdma_read_req()
1823 if (req->state == TID_REQUEST_RESEND) { in hfi1_build_tid_rdma_read_req()
1826 * earlier segments have been re-sent. We need to in hfi1_build_tid_rdma_read_req()
1829 restart_sge(&qp->s_sge, wqe, req->s_next_psn, in hfi1_build_tid_rdma_read_req()
1830 qp->pmtu); in hfi1_build_tid_rdma_read_req()
1831 req->isge = 0; in hfi1_build_tid_rdma_read_req()
1832 req->state = TID_REQUEST_ACTIVE; in hfi1_build_tid_rdma_read_req()
1839 if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) { in hfi1_build_tid_rdma_read_req()
1840 req->state = TID_REQUEST_SYNC; in hfi1_build_tid_rdma_read_req()
1845 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp)) in hfi1_build_tid_rdma_read_req()
1849 * The following call will advance req->setup_head after in hfi1_build_tid_rdma_read_req()
1852 if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) { in hfi1_build_tid_rdma_read_req()
1853 req->state = TID_REQUEST_QUEUED; in hfi1_build_tid_rdma_read_req()
1863 /* req->flow_idx should only be one slot behind req->setup_head */ in hfi1_build_tid_rdma_read_req()
1864 flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_read_req()
1865 flow->pkt = 0; in hfi1_build_tid_rdma_read_req()
1866 flow->tid_idx = 0; in hfi1_build_tid_rdma_read_req()
1867 flow->sent = 0; in hfi1_build_tid_rdma_read_req()
1870 flow->flow_state.ib_spsn = req->s_next_psn; in hfi1_build_tid_rdma_read_req()
1871 flow->flow_state.ib_lpsn = in hfi1_build_tid_rdma_read_req()
1872 flow->flow_state.ib_spsn + flow->npkts - 1; in hfi1_build_tid_rdma_read_req()
1876 req->s_next_psn += flow->npkts; in hfi1_build_tid_rdma_read_req()
1895 struct hfi1_qp_priv *qpriv = qp->priv; in tid_rdma_rcv_read_request()
1903 flow = &req->flows[req->setup_head]; in tid_rdma_rcv_read_request()
1905 /* payload length = packet length - (header length + ICRC length) */ in tid_rdma_rcv_read_request()
1906 pktlen = packet->tlen - (packet->hlen + 4); in tid_rdma_rcv_read_request()
1907 if (pktlen > sizeof(flow->tid_entry)) in tid_rdma_rcv_read_request()
1909 memcpy(flow->tid_entry, packet->ebuf, pktlen); in tid_rdma_rcv_read_request()
1910 flow->tidcnt = pktlen / sizeof(*flow->tid_entry); in tid_rdma_rcv_read_request()
1916 flow->npkts = rvt_div_round_up_mtu(qp, len); in tid_rdma_rcv_read_request()
1917 for (i = 0; i < flow->tidcnt; i++) { in tid_rdma_rcv_read_request()
1919 flow->tid_entry[i]); in tid_rdma_rcv_read_request()
1920 tlen = EXP_TID_GET(flow->tid_entry[i], LEN); in tid_rdma_rcv_read_request()
1936 req->clear_tail = req->setup_head; in tid_rdma_rcv_read_request()
1937 flow->pkt = 0; in tid_rdma_rcv_read_request()
1938 flow->tid_idx = 0; in tid_rdma_rcv_read_request()
1939 flow->tid_offset = 0; in tid_rdma_rcv_read_request()
1940 flow->sent = 0; in tid_rdma_rcv_read_request()
1941 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp); in tid_rdma_rcv_read_request()
1942 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & in tid_rdma_rcv_read_request()
1944 flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn)); in tid_rdma_rcv_read_request()
1945 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; in tid_rdma_rcv_read_request()
1946 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; in tid_rdma_rcv_read_request()
1947 flow->length = len; in tid_rdma_rcv_read_request()
1949 flow->flow_state.lpsn = flow->flow_state.spsn + in tid_rdma_rcv_read_request()
1950 flow->npkts - 1; in tid_rdma_rcv_read_request()
1951 flow->flow_state.ib_spsn = psn; in tid_rdma_rcv_read_request()
1952 flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1; in tid_rdma_rcv_read_request()
1954 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow); in tid_rdma_rcv_read_request()
1956 req->flow_idx = req->setup_head; in tid_rdma_rcv_read_request()
1959 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1); in tid_rdma_rcv_read_request()
1964 e->opcode = (bth0 >> 24) & 0xff; in tid_rdma_rcv_read_request()
1965 e->psn = psn; in tid_rdma_rcv_read_request()
1966 e->lpsn = psn + flow->npkts - 1; in tid_rdma_rcv_read_request()
1967 e->sent = 0; in tid_rdma_rcv_read_request()
1969 req->n_flows = qpriv->tid_rdma.local.max_read; in tid_rdma_rcv_read_request()
1970 req->state = TID_REQUEST_ACTIVE; in tid_rdma_rcv_read_request()
1971 req->cur_seg = 0; in tid_rdma_rcv_read_request()
1972 req->comp_seg = 0; in tid_rdma_rcv_read_request()
1973 req->ack_seg = 0; in tid_rdma_rcv_read_request()
1974 req->isge = 0; in tid_rdma_rcv_read_request()
1975 req->seg_len = qpriv->tid_rdma.local.max_len; in tid_rdma_rcv_read_request()
1976 req->total_len = len; in tid_rdma_rcv_read_request()
1977 req->total_segs = 1; in tid_rdma_rcv_read_request()
1978 req->r_flow_psn = e->psn; in tid_rdma_rcv_read_request()
1980 trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn, in tid_rdma_rcv_read_request()
1989 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in tid_rdma_rcv_error()
1990 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd; in tid_rdma_rcv_error()
1991 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in tid_rdma_rcv_error()
1992 struct hfi1_qp_priv *qpriv = qp->priv; in tid_rdma_rcv_error()
2003 if (!qp->r_nak_state) { in tid_rdma_rcv_error()
2004 ibp->rvp.n_rc_seqnak++; in tid_rdma_rcv_error()
2005 qp->r_nak_state = IB_NAK_PSN_ERROR; in tid_rdma_rcv_error()
2006 qp->r_ack_psn = qp->r_psn; in tid_rdma_rcv_error()
2012 ibp->rvp.n_rc_dupreq++; in tid_rdma_rcv_error()
2014 spin_lock_irqsave(&qp->s_lock, flags); in tid_rdma_rcv_error()
2016 if (!e || (e->opcode != TID_OP(READ_REQ) && in tid_rdma_rcv_error()
2017 e->opcode != TID_OP(WRITE_REQ))) in tid_rdma_rcv_error()
2021 req->r_flow_psn = psn; in tid_rdma_rcv_error()
2022 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req); in tid_rdma_rcv_error()
2023 if (e->opcode == TID_OP(READ_REQ)) { in tid_rdma_rcv_error()
2031 reth = &ohdr->u.tid_rdma.r_req.reth; in tid_rdma_rcv_error()
2036 len = be32_to_cpu(reth->length); in tid_rdma_rcv_error()
2037 if (psn != e->psn || len != req->total_len) in tid_rdma_rcv_error()
2042 rkey = be32_to_cpu(reth->rkey); in tid_rdma_rcv_error()
2045 qp->r_len = len; in tid_rdma_rcv_error()
2046 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, in tid_rdma_rcv_error()
2055 * req->clear_tail is advanced). However, when an earlier in tid_rdma_rcv_error()
2057 * more (qp->s_tail_ack_queue is moved back, see below). in tid_rdma_rcv_error()
2061 bth0 = be32_to_cpu(ohdr->bth[0]); in tid_rdma_rcv_error()
2068 * qp->s_tail_ack_queue and qp->r_head_ack_queue); in tid_rdma_rcv_error()
2077 if (req->state == TID_REQUEST_RESEND) { in tid_rdma_rcv_error()
2078 req->state = TID_REQUEST_RESEND_ACTIVE; in tid_rdma_rcv_error()
2079 } else if (req->state == TID_REQUEST_INIT_RESEND) { in tid_rdma_rcv_error()
2080 req->state = TID_REQUEST_INIT; in tid_rdma_rcv_error()
2086 * qp->s_tail_ack_queue and qp->r_head_ack_queue). in tid_rdma_rcv_error()
2091 if (old_req || req->state == TID_REQUEST_INIT || in tid_rdma_rcv_error()
2092 (req->state == TID_REQUEST_SYNC && !req->cur_seg)) { in tid_rdma_rcv_error()
2094 if (i > rvt_size_atomic(&dev->rdi)) in tid_rdma_rcv_error()
2096 if (i == qp->r_head_ack_queue) in tid_rdma_rcv_error()
2098 e = &qp->s_ack_queue[i]; in tid_rdma_rcv_error()
2100 if (e->opcode == TID_OP(WRITE_REQ) && in tid_rdma_rcv_error()
2101 req->state == TID_REQUEST_INIT) in tid_rdma_rcv_error()
2102 req->state = TID_REQUEST_INIT_RESEND; in tid_rdma_rcv_error()
2118 if (req->clear_tail == req->setup_head) in tid_rdma_rcv_error()
2124 * re-sent. in tid_rdma_rcv_error()
2126 if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) { in tid_rdma_rcv_error()
2127 fstate = &req->flows[req->clear_tail].flow_state; in tid_rdma_rcv_error()
2128 qpriv->pending_tid_w_segs -= in tid_rdma_rcv_error()
2129 CIRC_CNT(req->flow_idx, req->clear_tail, in tid_rdma_rcv_error()
2131 req->flow_idx = in tid_rdma_rcv_error()
2132 CIRC_ADD(req->clear_tail, in tid_rdma_rcv_error()
2133 delta_psn(psn, fstate->resp_ib_psn), in tid_rdma_rcv_error()
2135 qpriv->pending_tid_w_segs += in tid_rdma_rcv_error()
2136 delta_psn(psn, fstate->resp_ib_psn); in tid_rdma_rcv_error()
2144 if (CIRC_CNT(req->setup_head, req->flow_idx, in tid_rdma_rcv_error()
2146 req->cur_seg = delta_psn(psn, e->psn); in tid_rdma_rcv_error()
2147 req->state = TID_REQUEST_RESEND_ACTIVE; in tid_rdma_rcv_error()
2156 if (i > rvt_size_atomic(&dev->rdi)) in tid_rdma_rcv_error()
2158 if (i == qp->r_head_ack_queue) in tid_rdma_rcv_error()
2160 e = &qp->s_ack_queue[i]; in tid_rdma_rcv_error()
2162 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, in tid_rdma_rcv_error()
2163 e->lpsn, req); in tid_rdma_rcv_error()
2164 if (e->opcode != TID_OP(WRITE_REQ) || in tid_rdma_rcv_error()
2165 req->cur_seg == req->comp_seg || in tid_rdma_rcv_error()
2166 req->state == TID_REQUEST_INIT || in tid_rdma_rcv_error()
2167 req->state == TID_REQUEST_INIT_RESEND) { in tid_rdma_rcv_error()
2168 if (req->state == TID_REQUEST_INIT) in tid_rdma_rcv_error()
2169 req->state = TID_REQUEST_INIT_RESEND; in tid_rdma_rcv_error()
2172 qpriv->pending_tid_w_segs -= in tid_rdma_rcv_error()
2173 CIRC_CNT(req->flow_idx, in tid_rdma_rcv_error()
2174 req->clear_tail, in tid_rdma_rcv_error()
2176 req->flow_idx = req->clear_tail; in tid_rdma_rcv_error()
2177 req->state = TID_REQUEST_RESEND; in tid_rdma_rcv_error()
2178 req->cur_seg = req->comp_seg; in tid_rdma_rcv_error()
2180 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK; in tid_rdma_rcv_error()
2182 /* Re-process old requests.*/ in tid_rdma_rcv_error()
2183 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) in tid_rdma_rcv_error()
2184 qp->s_acked_ack_queue = prev; in tid_rdma_rcv_error()
2185 qp->s_tail_ack_queue = prev; in tid_rdma_rcv_error()
2187 * Since the qp->s_tail_ack_queue is modified, the in tid_rdma_rcv_error()
2188 * qp->s_ack_state must be changed to re-initialize in tid_rdma_rcv_error()
2189 * qp->s_ack_rdma_sge; Otherwise, we will end up in in tid_rdma_rcv_error()
2192 qp->s_ack_state = OP(ACKNOWLEDGE); in tid_rdma_rcv_error()
2198 if (qpriv->rnr_nak_state) { in tid_rdma_rcv_error()
2199 qp->s_nak_state = 0; in tid_rdma_rcv_error()
2200 qpriv->rnr_nak_state = TID_RNR_NAK_INIT; in tid_rdma_rcv_error()
2201 qp->r_psn = e->lpsn + 1; in tid_rdma_rcv_error()
2205 qp->r_state = e->opcode; in tid_rdma_rcv_error()
2206 qp->r_nak_state = 0; in tid_rdma_rcv_error()
2207 qp->s_flags |= RVT_S_RESP_PENDING; in tid_rdma_rcv_error()
2210 spin_unlock_irqrestore(&qp->s_lock, flags); in tid_rdma_rcv_error()
2223 * - Setup struct tid_rdma_req with request info in hfi1_rc_rcv_tid_rdma_read_req()
2224 * - Initialize struct tid_rdma_flow info; in hfi1_rc_rcv_tid_rdma_read_req()
2225 * - Copy TID entries; in hfi1_rc_rcv_tid_rdma_read_req()
2226 * 3. Set the qp->s_ack_state. in hfi1_rc_rcv_tid_rdma_read_req()
2230 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_read_req()
2231 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_read_req()
2232 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_rc_rcv_tid_rdma_read_req()
2233 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv_tid_rdma_read_req()
2237 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_read_req()
2245 bth0 = be32_to_cpu(ohdr->bth[0]); in hfi1_rc_rcv_tid_rdma_read_req()
2250 psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_read_req()
2253 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) in hfi1_rc_rcv_tid_rdma_read_req()
2256 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) in hfi1_rc_rcv_tid_rdma_read_req()
2259 reth = &ohdr->u.tid_rdma.r_req.reth; in hfi1_rc_rcv_tid_rdma_read_req()
2260 vaddr = be64_to_cpu(reth->vaddr); in hfi1_rc_rcv_tid_rdma_read_req()
2261 len = be32_to_cpu(reth->length); in hfi1_rc_rcv_tid_rdma_read_req()
2263 if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len) in hfi1_rc_rcv_tid_rdma_read_req()
2266 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_rcv_tid_rdma_read_req()
2273 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv_tid_rdma_read_req()
2274 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv_tid_rdma_read_req()
2276 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2277 if (unlikely(next == qp->s_tail_ack_queue)) { in hfi1_rc_rcv_tid_rdma_read_req()
2278 if (!qp->s_ack_queue[next].sent) { in hfi1_rc_rcv_tid_rdma_read_req()
2284 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv_tid_rdma_read_req()
2287 rkey = be32_to_cpu(reth->rkey); in hfi1_rc_rcv_tid_rdma_read_req()
2288 qp->r_len = len; in hfi1_rc_rcv_tid_rdma_read_req()
2290 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, in hfi1_rc_rcv_tid_rdma_read_req()
2299 qp->r_state = e->opcode; in hfi1_rc_rcv_tid_rdma_read_req()
2300 qp->r_nak_state = 0; in hfi1_rc_rcv_tid_rdma_read_req()
2306 qp->r_msn++; in hfi1_rc_rcv_tid_rdma_read_req()
2307 qp->r_psn += e->lpsn - e->psn + 1; in hfi1_rc_rcv_tid_rdma_read_req()
2309 qp->r_head_ack_queue = next; in hfi1_rc_rcv_tid_rdma_read_req()
2313 * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to in hfi1_rc_rcv_tid_rdma_read_req()
2317 qpriv->r_tid_alloc = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_read_req()
2320 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv_tid_rdma_read_req()
2322 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_read_req()
2325 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2329 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2332 qp->r_nak_state = nack_state; in hfi1_rc_rcv_tid_rdma_read_req()
2333 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_read_req()
2338 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2340 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; in hfi1_rc_rcv_tid_rdma_read_req()
2341 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_read_req()
2348 struct hfi1_ack_priv *epriv = e->priv; in hfi1_build_tid_rdma_read_resp()
2349 struct tid_rdma_request *req = &epriv->tid_req; in hfi1_build_tid_rdma_read_resp()
2350 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_read_resp()
2351 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_build_tid_rdma_read_resp()
2352 u32 tidentry = flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_read_resp()
2354 struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp; in hfi1_build_tid_rdma_read_resp()
2360 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_read_resp()
2361 flow->sent += *len; in hfi1_build_tid_rdma_read_resp()
2362 next_offset = flow->tid_offset + *len; in hfi1_build_tid_rdma_read_resp()
2363 last_pkt = (flow->sent >= flow->length); in hfi1_build_tid_rdma_read_resp()
2365 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_read_resp()
2366 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_read_resp()
2369 remote = rcu_dereference(qpriv->tid_rdma.remote); in hfi1_build_tid_rdma_read_resp()
2374 KDETH_RESET(resp->kdeth0, KVER, 0x1); in hfi1_build_tid_rdma_read_resp()
2375 KDETH_SET(resp->kdeth0, SH, !last_pkt); in hfi1_build_tid_rdma_read_resp()
2376 KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg)); in hfi1_build_tid_rdma_read_resp()
2377 KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL)); in hfi1_build_tid_rdma_read_resp()
2378 KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX)); in hfi1_build_tid_rdma_read_resp()
2379 KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE); in hfi1_build_tid_rdma_read_resp()
2380 KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om); in hfi1_build_tid_rdma_read_resp()
2381 KDETH_RESET(resp->kdeth1, JKEY, remote->jkey); in hfi1_build_tid_rdma_read_resp()
2382 resp->verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_read_resp()
2385 resp->aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_read_resp()
2386 resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn + in hfi1_build_tid_rdma_read_resp()
2387 flow->pkt)); in hfi1_build_tid_rdma_read_resp()
2390 *bth1 = flow->tid_qpn; in hfi1_build_tid_rdma_read_resp()
2391 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & in hfi1_build_tid_rdma_read_resp()
2393 (flow->flow_state.generation << in hfi1_build_tid_rdma_read_resp()
2398 req->clear_tail = (req->clear_tail + 1) & in hfi1_build_tid_rdma_read_resp()
2399 (MAX_FLOWS - 1); in hfi1_build_tid_rdma_read_resp()
2402 flow->tid_offset = 0; in hfi1_build_tid_rdma_read_resp()
2403 flow->tid_idx++; in hfi1_build_tid_rdma_read_resp()
2405 flow->tid_offset = next_offset; in hfi1_build_tid_rdma_read_resp()
2408 hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32); in hfi1_build_tid_rdma_read_resp()
2416 __must_hold(&qp->s_lock) in find_tid_request()
2422 end = qp->s_cur + 1; in find_tid_request()
2423 if (end == qp->s_size) in find_tid_request()
2425 for (i = qp->s_acked; i != end;) { in find_tid_request()
2427 if (cmp_psn(psn, wqe->psn) >= 0 && in find_tid_request()
2428 cmp_psn(psn, wqe->lpsn) <= 0) { in find_tid_request()
2429 if (wqe->wr.opcode == opcode) in find_tid_request()
2433 if (++i == qp->s_size) in find_tid_request()
2451 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv_tid_rdma_read_resp()
2452 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_read_resp()
2453 struct hfi1_qp_priv *priv = qp->priv; in hfi1_rc_rcv_tid_rdma_read_resp()
2454 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_read_resp()
2464 kpsn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_read_resp()
2465 aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth); in hfi1_rc_rcv_tid_rdma_read_resp()
2466 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; in hfi1_rc_rcv_tid_rdma_read_resp()
2468 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_resp()
2469 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn)); in hfi1_rc_rcv_tid_rdma_read_resp()
2474 flow = &req->flows[req->clear_tail]; in hfi1_rc_rcv_tid_rdma_read_resp()
2476 if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) { in hfi1_rc_rcv_tid_rdma_read_resp()
2479 if (cmp_psn(kpsn, flow->flow_state.r_next_psn)) in hfi1_rc_rcv_tid_rdma_read_resp()
2481 flow->flow_state.r_next_psn = mask_psn(kpsn + 1); in hfi1_rc_rcv_tid_rdma_read_resp()
2489 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) { in hfi1_rc_rcv_tid_rdma_read_resp()
2492 u32 tlen = packet->tlen; in hfi1_rc_rcv_tid_rdma_read_resp()
2493 u16 hdrsize = packet->hlen; in hfi1_rc_rcv_tid_rdma_read_resp()
2494 u8 pad = packet->pad; in hfi1_rc_rcv_tid_rdma_read_resp()
2495 u8 extra_bytes = pad + packet->extra_byte + in hfi1_rc_rcv_tid_rdma_read_resp()
2497 u32 pmtu = qp->pmtu; in hfi1_rc_rcv_tid_rdma_read_resp()
2501 len = restart_sge(&ss, req->e.swqe, ipsn, pmtu); in hfi1_rc_rcv_tid_rdma_read_resp()
2504 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false, in hfi1_rc_rcv_tid_rdma_read_resp()
2507 priv->s_flags |= HFI1_R_TID_SW_PSN; in hfi1_rc_rcv_tid_rdma_read_resp()
2512 flow->flow_state.r_next_psn = mask_psn(kpsn + 1); in hfi1_rc_rcv_tid_rdma_read_resp()
2513 req->ack_pending--; in hfi1_rc_rcv_tid_rdma_read_resp()
2514 priv->pending_tid_r_segs--; in hfi1_rc_rcv_tid_rdma_read_resp()
2515 qp->s_num_rd_atomic--; in hfi1_rc_rcv_tid_rdma_read_resp()
2516 if ((qp->s_flags & RVT_S_WAIT_FENCE) && in hfi1_rc_rcv_tid_rdma_read_resp()
2517 !qp->s_num_rd_atomic) { in hfi1_rc_rcv_tid_rdma_read_resp()
2518 qp->s_flags &= ~(RVT_S_WAIT_FENCE | in hfi1_rc_rcv_tid_rdma_read_resp()
2522 if (qp->s_flags & RVT_S_WAIT_RDMAR) { in hfi1_rc_rcv_tid_rdma_read_resp()
2523 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK); in hfi1_rc_rcv_tid_rdma_read_resp()
2528 trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode, in hfi1_rc_rcv_tid_rdma_read_resp()
2529 req->e.swqe->psn, req->e.swqe->lpsn, in hfi1_rc_rcv_tid_rdma_read_resp()
2531 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow); in hfi1_rc_rcv_tid_rdma_read_resp()
2540 if (++req->comp_seg >= req->total_segs) { in hfi1_rc_rcv_tid_rdma_read_resp()
2541 priv->tid_r_comp++; in hfi1_rc_rcv_tid_rdma_read_resp()
2542 req->state = TID_REQUEST_COMPLETE; in hfi1_rc_rcv_tid_rdma_read_resp()
2550 if ((req->state == TID_REQUEST_SYNC && in hfi1_rc_rcv_tid_rdma_read_resp()
2551 req->comp_seg == req->cur_seg) || in hfi1_rc_rcv_tid_rdma_read_resp()
2552 priv->tid_r_comp == priv->tid_r_reqs) { in hfi1_rc_rcv_tid_rdma_read_resp()
2553 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2554 priv->s_flags &= ~HFI1_R_TID_SW_PSN; in hfi1_rc_rcv_tid_rdma_read_resp()
2555 if (req->state == TID_REQUEST_SYNC) in hfi1_rc_rcv_tid_rdma_read_resp()
2556 req->state = TID_REQUEST_ACTIVE; in hfi1_rc_rcv_tid_rdma_read_resp()
2566 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail in hfi1_rc_rcv_tid_rdma_read_resp()
2567 * == qp->s_head), it would be unsafe to complete the wqe pointed by in hfi1_rc_rcv_tid_rdma_read_resp()
2568 * qp->s_acked here. Putting the qp into error state will safely flush in hfi1_rc_rcv_tid_rdma_read_resp()
2571 if (qp->s_last == qp->s_acked) in hfi1_rc_rcv_tid_rdma_read_resp()
2575 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_resp()
2579 __must_hold(&qp->s_lock) in hfi1_kern_read_tid_flow_free()
2581 u32 n = qp->s_acked; in hfi1_kern_read_tid_flow_free()
2584 struct hfi1_qp_priv *priv = qp->priv; in hfi1_kern_read_tid_flow_free()
2586 lockdep_assert_held(&qp->s_lock); in hfi1_kern_read_tid_flow_free()
2588 while (n != qp->s_tail) { in hfi1_kern_read_tid_flow_free()
2590 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in hfi1_kern_read_tid_flow_free()
2595 if (++n == qp->s_size) in hfi1_kern_read_tid_flow_free()
2599 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_kern_read_tid_flow_free()
2604 struct rvt_qp *qp = packet->qp; in tid_rdma_tid_err()
2609 spin_lock(&qp->s_lock); in tid_rdma_tid_err()
2615 * response packets. In this case, we have to re-transmit the in tid_rdma_tid_err()
2619 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); in tid_rdma_tid_err()
2624 spin_unlock(&qp->s_lock); in tid_rdma_tid_err()
2636 qp->r_flags |= RVT_R_RDMAR_SEQ; in restart_tid_rdma_read_req()
2638 flow = &req->flows[req->clear_tail]; in restart_tid_rdma_read_req()
2639 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0); in restart_tid_rdma_read_req()
2640 if (list_empty(&qp->rspwait)) { in restart_tid_rdma_read_req()
2641 qp->r_flags |= RVT_R_RSP_SEND; in restart_tid_rdma_read_req()
2643 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in restart_tid_rdma_read_req()
2653 * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
2658 __must_hold(&packet->qp->r_lock) __must_hold(RCU) in handle_read_kdeth_eflags()
2660 struct hfi1_pportdata *ppd = rcd->ppd; in handle_read_kdeth_eflags()
2661 struct hfi1_devdata *dd = ppd->dd; in handle_read_kdeth_eflags()
2667 struct rvt_qp *qp = packet->qp; in handle_read_kdeth_eflags()
2668 struct hfi1_qp_priv *priv = qp->priv; in handle_read_kdeth_eflags()
2673 lockdep_assert_held(&qp->r_lock); in handle_read_kdeth_eflags()
2677 spin_lock(&qp->s_lock); in handle_read_kdeth_eflags()
2679 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || in handle_read_kdeth_eflags()
2680 cmp_psn(ibpsn, qp->s_psn) > 0) in handle_read_kdeth_eflags()
2688 ack_psn = ibpsn - 1; in handle_read_kdeth_eflags()
2689 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in handle_read_kdeth_eflags()
2690 ibp = to_iport(qp->ibqp.device, qp->port_num); in handle_read_kdeth_eflags()
2693 while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) { in handle_read_kdeth_eflags()
2699 if (wqe->wr.opcode == IB_WR_RDMA_READ || in handle_read_kdeth_eflags()
2700 wqe->wr.opcode == IB_WR_TID_RDMA_READ || in handle_read_kdeth_eflags()
2701 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in handle_read_kdeth_eflags()
2702 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { in handle_read_kdeth_eflags()
2704 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { in handle_read_kdeth_eflags()
2705 qp->r_flags |= RVT_R_RDMAR_SEQ; in handle_read_kdeth_eflags()
2706 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in handle_read_kdeth_eflags()
2710 hfi1_restart_rc(qp, qp->s_last_psn + 1, in handle_read_kdeth_eflags()
2712 if (list_empty(&qp->rspwait)) { in handle_read_kdeth_eflags()
2713 qp->r_flags |= RVT_R_RSP_SEND; in handle_read_kdeth_eflags()
2716 &qp->rspwait, in handle_read_kdeth_eflags()
2717 &rcd->qp_wait_list); in handle_read_kdeth_eflags()
2729 if (qp->s_acked == qp->s_tail) in handle_read_kdeth_eflags()
2733 if (qp->s_acked == qp->s_tail) in handle_read_kdeth_eflags()
2737 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) in handle_read_kdeth_eflags()
2741 trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn, in handle_read_kdeth_eflags()
2742 wqe->lpsn, req); in handle_read_kdeth_eflags()
2756 flow = &req->flows[req->clear_tail]; in handle_read_kdeth_eflags()
2758 req->clear_tail, in handle_read_kdeth_eflags()
2760 if (priv->s_flags & HFI1_R_TID_SW_PSN) { in handle_read_kdeth_eflags()
2762 flow->flow_state.r_next_psn); in handle_read_kdeth_eflags()
2772 if (qp->r_flags & RVT_R_RDMAR_SEQ) in handle_read_kdeth_eflags()
2773 qp->r_flags &= in handle_read_kdeth_eflags()
2786 flow->flow_state.lpsn); in handle_read_kdeth_eflags()
2789 if (qp->r_flags & RVT_R_RDMAR_SEQ) in handle_read_kdeth_eflags()
2790 qp->r_flags &= in handle_read_kdeth_eflags()
2793 flow->flow_state.r_next_psn = in handle_read_kdeth_eflags()
2798 last_psn = read_r_next_psn(dd, rcd->ctxt, in handle_read_kdeth_eflags()
2799 flow->idx); in handle_read_kdeth_eflags()
2800 flow->flow_state.r_next_psn = last_psn; in handle_read_kdeth_eflags()
2801 priv->s_flags |= HFI1_R_TID_SW_PSN; in handle_read_kdeth_eflags()
2806 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) in handle_read_kdeth_eflags()
2841 spin_unlock(&qp->s_lock); in handle_read_kdeth_eflags()
2849 struct hfi1_ibport *ibp = &ppd->ibport_data; in hfi1_handle_kdeth_eflags()
2850 struct hfi1_devdata *dd = ppd->dd; in hfi1_handle_kdeth_eflags()
2851 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; in hfi1_handle_kdeth_eflags()
2852 u8 rcv_type = rhf_rcv_type(packet->rhf); in hfi1_handle_kdeth_eflags()
2853 u8 rte = rhf_rcv_type_err(packet->rhf); in hfi1_handle_kdeth_eflags()
2854 struct ib_header *hdr = packet->hdr; in hfi1_handle_kdeth_eflags()
2856 int lnh = be16_to_cpu(hdr->lrh[0]) & 3; in hfi1_handle_kdeth_eflags()
2857 u16 lid = be16_to_cpu(hdr->lrh[1]); in hfi1_handle_kdeth_eflags()
2870 packet->rhf); in hfi1_handle_kdeth_eflags()
2871 if (packet->rhf & RHF_ICRC_ERR) in hfi1_handle_kdeth_eflags()
2874 packet->ohdr = &hdr->u.oth; in hfi1_handle_kdeth_eflags()
2875 ohdr = packet->ohdr; in hfi1_handle_kdeth_eflags()
2876 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); in hfi1_handle_kdeth_eflags()
2879 qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) & in hfi1_handle_kdeth_eflags()
2884 psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_handle_kdeth_eflags()
2885 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; in hfi1_handle_kdeth_eflags()
2888 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); in hfi1_handle_kdeth_eflags()
2892 packet->qp = qp; in hfi1_handle_kdeth_eflags()
2895 spin_lock_irqsave(&qp->r_lock, flags); in hfi1_handle_kdeth_eflags()
2896 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in hfi1_handle_kdeth_eflags()
2897 ibp->rvp.n_pkt_drops++; in hfi1_handle_kdeth_eflags()
2901 if (packet->rhf & RHF_TID_ERR) { in hfi1_handle_kdeth_eflags()
2903 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ in hfi1_handle_kdeth_eflags()
2922 ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn); in hfi1_handle_kdeth_eflags()
2930 * qp->s_tail_ack_queue points to the rvt_ack_entry currently being in hfi1_handle_kdeth_eflags()
2934 spin_lock(&qp->s_lock); in hfi1_handle_kdeth_eflags()
2935 qpriv = qp->priv; in hfi1_handle_kdeth_eflags()
2936 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID || in hfi1_handle_kdeth_eflags()
2937 qpriv->r_tid_tail == qpriv->r_tid_head) in hfi1_handle_kdeth_eflags()
2939 e = &qp->s_ack_queue[qpriv->r_tid_tail]; in hfi1_handle_kdeth_eflags()
2940 if (e->opcode != TID_OP(WRITE_REQ)) in hfi1_handle_kdeth_eflags()
2943 if (req->comp_seg == req->cur_seg) in hfi1_handle_kdeth_eflags()
2945 flow = &req->flows[req->clear_tail]; in hfi1_handle_kdeth_eflags()
2949 trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn, in hfi1_handle_kdeth_eflags()
2950 e->lpsn, req); in hfi1_handle_kdeth_eflags()
2951 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow); in hfi1_handle_kdeth_eflags()
2957 if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) { in hfi1_handle_kdeth_eflags()
2958 qpriv->s_flags |= HFI1_R_TID_SW_PSN; in hfi1_handle_kdeth_eflags()
2959 flow->flow_state.r_next_psn = in hfi1_handle_kdeth_eflags()
2960 read_r_next_psn(dd, rcd->ctxt, in hfi1_handle_kdeth_eflags()
2961 flow->idx); in hfi1_handle_kdeth_eflags()
2962 qpriv->r_next_psn_kdeth = in hfi1_handle_kdeth_eflags()
2963 flow->flow_state.r_next_psn; in hfi1_handle_kdeth_eflags()
2975 flow->flow_state.r_next_psn); in hfi1_handle_kdeth_eflags()
2981 qpriv->s_nak_state = 0; in hfi1_handle_kdeth_eflags()
2988 flow->flow_state.lpsn)) in hfi1_handle_kdeth_eflags()
2990 flow->flow_state.r_next_psn = in hfi1_handle_kdeth_eflags()
2992 qpriv->r_next_psn_kdeth = in hfi1_handle_kdeth_eflags()
2993 flow->flow_state.r_next_psn; in hfi1_handle_kdeth_eflags()
3022 spin_unlock(&qp->s_lock); in hfi1_handle_kdeth_eflags()
3024 spin_unlock_irqrestore(&qp->r_lock, flags); in hfi1_handle_kdeth_eflags()
3030 ibp->rvp.n_rc_seqnak++; in hfi1_handle_kdeth_eflags()
3031 if (!qpriv->s_nak_state) { in hfi1_handle_kdeth_eflags()
3032 qpriv->s_nak_state = IB_NAK_PSN_ERROR; in hfi1_handle_kdeth_eflags()
3034 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn); in hfi1_handle_kdeth_eflags()
3051 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_tid_rdma_restart_req()
3056 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in hfi1_tid_rdma_restart_req()
3057 *bth2 = mask_psn(qp->s_psn); in hfi1_tid_rdma_restart_req()
3063 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, in hfi1_tid_rdma_restart_req()
3064 wqe->psn, wqe->lpsn, in hfi1_tid_rdma_restart_req()
3069 fidx = req->acked_tail; in hfi1_tid_rdma_restart_req()
3070 flow = &req->flows[fidx]; in hfi1_tid_rdma_restart_req()
3071 *bth2 = mask_psn(req->r_ack_psn); in hfi1_tid_rdma_restart_req()
3074 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) in hfi1_tid_rdma_restart_req()
3075 delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn); in hfi1_tid_rdma_restart_req()
3079 flow->flow_state.spsn)); in hfi1_tid_rdma_restart_req()
3082 diff = delta_pkts + flow->resync_npkts; in hfi1_tid_rdma_restart_req()
3084 flow->sent = 0; in hfi1_tid_rdma_restart_req()
3085 flow->pkt = 0; in hfi1_tid_rdma_restart_req()
3086 flow->tid_idx = 0; in hfi1_tid_rdma_restart_req()
3087 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3089 for (tididx = 0; tididx < flow->tidcnt; tididx++) { in hfi1_tid_rdma_restart_req()
3090 u32 tidentry = flow->tid_entry[tididx], tidlen, in hfi1_tid_rdma_restart_req()
3093 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3097 flow->pkt += npkts; in hfi1_tid_rdma_restart_req()
3098 flow->sent += (npkts == tidnpkts ? tidlen : in hfi1_tid_rdma_restart_req()
3099 npkts * qp->pmtu); in hfi1_tid_rdma_restart_req()
3100 flow->tid_offset += npkts * qp->pmtu; in hfi1_tid_rdma_restart_req()
3101 diff -= npkts; in hfi1_tid_rdma_restart_req()
3106 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { in hfi1_tid_rdma_restart_req()
3107 rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) + in hfi1_tid_rdma_restart_req()
3108 flow->sent, 0); in hfi1_tid_rdma_restart_req()
3110 * Packet PSN is based on flow_state.spsn + flow->pkt. However, in hfi1_tid_rdma_restart_req()
3114 * adjust flow->pkt in order to calculate the correct PSN. in hfi1_tid_rdma_restart_req()
3116 flow->pkt -= flow->resync_npkts; in hfi1_tid_rdma_restart_req()
3119 if (flow->tid_offset == in hfi1_tid_rdma_restart_req()
3120 EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) { in hfi1_tid_rdma_restart_req()
3122 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3124 flow->tid_idx = tididx; in hfi1_tid_rdma_restart_req()
3125 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) in hfi1_tid_rdma_restart_req()
3127 req->flow_idx = fidx; in hfi1_tid_rdma_restart_req()
3129 req->clear_tail = fidx; in hfi1_tid_rdma_restart_req()
3132 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_tid_rdma_restart_req()
3133 wqe->lpsn, req); in hfi1_tid_rdma_restart_req()
3134 req->state = TID_REQUEST_ACTIVE; in hfi1_tid_rdma_restart_req()
3135 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { in hfi1_tid_rdma_restart_req()
3138 i = qpriv->s_tid_tail; in hfi1_tid_rdma_restart_req()
3140 for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS); in hfi1_tid_rdma_restart_req()
3142 req->flows[fidx].sent = 0; in hfi1_tid_rdma_restart_req()
3143 req->flows[fidx].pkt = 0; in hfi1_tid_rdma_restart_req()
3144 req->flows[fidx].tid_idx = 0; in hfi1_tid_rdma_restart_req()
3145 req->flows[fidx].tid_offset = 0; in hfi1_tid_rdma_restart_req()
3146 req->flows[fidx].resync_npkts = 0; in hfi1_tid_rdma_restart_req()
3148 if (i == qpriv->s_tid_cur) in hfi1_tid_rdma_restart_req()
3151 i = (++i == qp->s_size ? 0 : i); in hfi1_tid_rdma_restart_req()
3153 } while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE); in hfi1_tid_rdma_restart_req()
3155 req->cur_seg = req->ack_seg; in hfi1_tid_rdma_restart_req()
3156 fidx = req->acked_tail; in hfi1_tid_rdma_restart_req()
3157 /* Pull req->clear_tail back */ in hfi1_tid_rdma_restart_req()
3158 req->clear_tail = fidx; in hfi1_tid_rdma_restart_req()
3166 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_qp_kern_exp_rcv_clear_all()
3169 if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA)) in hfi1_qp_kern_exp_rcv_clear_all()
3176 fs = &qpriv->flow_state; in hfi1_qp_kern_exp_rcv_clear_all()
3177 if (fs->index != RXE_NUM_TID_FLOWS) in hfi1_qp_kern_exp_rcv_clear_all()
3178 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_qp_kern_exp_rcv_clear_all()
3180 for (i = qp->s_acked; i != qp->s_head;) { in hfi1_qp_kern_exp_rcv_clear_all()
3183 if (++i == qp->s_size) in hfi1_qp_kern_exp_rcv_clear_all()
3186 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) in hfi1_qp_kern_exp_rcv_clear_all()
3189 struct hfi1_swqe_priv *priv = wqe->priv; in hfi1_qp_kern_exp_rcv_clear_all()
3191 ret = hfi1_kern_exp_rcv_clear(&priv->tid_req); in hfi1_qp_kern_exp_rcv_clear_all()
3194 for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) { in hfi1_qp_kern_exp_rcv_clear_all()
3195 struct rvt_ack_entry *e = &qp->s_ack_queue[i]; in hfi1_qp_kern_exp_rcv_clear_all()
3197 if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_qp_kern_exp_rcv_clear_all()
3200 if (e->opcode != TID_OP(WRITE_REQ)) in hfi1_qp_kern_exp_rcv_clear_all()
3203 struct hfi1_ack_priv *priv = e->priv; in hfi1_qp_kern_exp_rcv_clear_all()
3205 ret = hfi1_kern_exp_rcv_clear(&priv->tid_req); in hfi1_qp_kern_exp_rcv_clear_all()
3213 struct hfi1_qp_priv *priv = qp->priv; in hfi1_tid_rdma_wqe_interlock()
3217 s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1; in hfi1_tid_rdma_wqe_interlock()
3220 switch (wqe->wr.opcode) { in hfi1_tid_rdma_wqe_interlock()
3228 switch (prev->wr.opcode) { in hfi1_tid_rdma_wqe_interlock()
3231 if (req->ack_seg != req->total_segs) in hfi1_tid_rdma_wqe_interlock()
3239 if (prev->wr.opcode != IB_WR_TID_RDMA_WRITE) in hfi1_tid_rdma_wqe_interlock()
3243 switch (prev->wr.opcode) { in hfi1_tid_rdma_wqe_interlock()
3245 if (qp->s_acked != qp->s_cur) in hfi1_tid_rdma_wqe_interlock()
3250 if (req->ack_seg != req->total_segs) in hfi1_tid_rdma_wqe_interlock()
3263 priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK; in hfi1_tid_rdma_wqe_interlock()
3275 if ((u64)sge->vaddr & ~PAGE_MASK || in hfi1_check_sge_align()
3276 sge->sge_length & ~PAGE_MASK) in hfi1_check_sge_align()
3284 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; in setup_tid_rdma_wqe()
3285 struct hfi1_swqe_priv *priv = wqe->priv; in setup_tid_rdma_wqe()
3289 struct hfi1_pportdata *ppd = qpriv->rcd->ppd; in setup_tid_rdma_wqe()
3291 if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) == in setup_tid_rdma_wqe()
3292 ppd->lid) in setup_tid_rdma_wqe()
3294 if (qpriv->hdr_type != HFI1_PKT_TYPE_9B) in setup_tid_rdma_wqe()
3298 remote = rcu_dereference(qpriv->tid_rdma.remote); in setup_tid_rdma_wqe()
3306 if (wqe->wr.opcode == IB_WR_RDMA_READ) { in setup_tid_rdma_wqe()
3307 if (hfi1_check_sge_align(qp, &wqe->sg_list[0], in setup_tid_rdma_wqe()
3308 wqe->wr.num_sge)) { in setup_tid_rdma_wqe()
3312 } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in setup_tid_rdma_wqe()
3315 * 1. The remote address is page-aligned, in setup_tid_rdma_wqe()
3317 * 3. The length is page-multiple. in setup_tid_rdma_wqe()
3319 if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) && in setup_tid_rdma_wqe()
3320 !(wqe->length & ~PAGE_MASK)) { in setup_tid_rdma_wqe()
3327 if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC)) in setup_tid_rdma_wqe()
3329 wqe->wr.opcode = new_opcode; in setup_tid_rdma_wqe()
3330 priv->tid_req.seg_len = in setup_tid_rdma_wqe()
3331 min_t(u32, remote->max_len, wqe->length); in setup_tid_rdma_wqe()
3332 priv->tid_req.total_segs = in setup_tid_rdma_wqe()
3333 DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len); in setup_tid_rdma_wqe()
3335 wqe->lpsn = wqe->psn; in setup_tid_rdma_wqe()
3336 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in setup_tid_rdma_wqe()
3337 priv->tid_req.n_flows = remote->max_read; in setup_tid_rdma_wqe()
3338 qpriv->tid_r_reqs++; in setup_tid_rdma_wqe()
3339 wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1; in setup_tid_rdma_wqe()
3341 wqe->lpsn += priv->tid_req.total_segs - 1; in setup_tid_rdma_wqe()
3342 atomic_inc(&qpriv->n_requests); in setup_tid_rdma_wqe()
3345 priv->tid_req.cur_seg = 0; in setup_tid_rdma_wqe()
3346 priv->tid_req.comp_seg = 0; in setup_tid_rdma_wqe()
3347 priv->tid_req.ack_seg = 0; in setup_tid_rdma_wqe()
3348 priv->tid_req.state = TID_REQUEST_INACTIVE; in setup_tid_rdma_wqe()
3355 priv->tid_req.acked_tail = priv->tid_req.setup_head; in setup_tid_rdma_wqe()
3356 trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode, in setup_tid_rdma_wqe()
3357 wqe->psn, wqe->lpsn, in setup_tid_rdma_wqe()
3358 &priv->tid_req); in setup_tid_rdma_wqe()
3370 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_write_req()
3375 remote = rcu_dereference(qpriv->tid_rdma.remote); in hfi1_build_tid_rdma_write_req()
3380 req->n_flows = remote->max_write; in hfi1_build_tid_rdma_write_req()
3381 req->state = TID_REQUEST_ACTIVE; in hfi1_build_tid_rdma_write_req()
3383 KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1); in hfi1_build_tid_rdma_write_req()
3384 KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey); in hfi1_build_tid_rdma_write_req()
3385 ohdr->u.tid_rdma.w_req.reth.vaddr = in hfi1_build_tid_rdma_write_req()
3386 cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len)); in hfi1_build_tid_rdma_write_req()
3387 ohdr->u.tid_rdma.w_req.reth.rkey = in hfi1_build_tid_rdma_write_req()
3388 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_build_tid_rdma_write_req()
3389 ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len); in hfi1_build_tid_rdma_write_req()
3390 ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_write_req()
3392 *bth1 |= remote->qp; in hfi1_build_tid_rdma_write_req()
3393 qp->s_state = TID_OP(WRITE_REQ); in hfi1_build_tid_rdma_write_req()
3394 qp->s_flags |= HFI1_S_WAIT_TID_RESP; in hfi1_build_tid_rdma_write_req()
3399 return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32); in hfi1_build_tid_rdma_write_req()
3412 return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT; in hfi1_compute_tid_rdma_flow_wt()
3418 return qpriv->tid_enqueue - queue->dequeue; in position_in_queue()
3428 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_compute_tid_rnr_timeout()
3433 bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8; in hfi1_compute_tid_rnr_timeout()
3452 * themselves to allocate resources for up to local->max_write
3460 * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3462 * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3467 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_tid_write_alloc_resources()
3468 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_tid_write_alloc_resources()
3469 struct tid_rdma_params *local = &qpriv->tid_rdma.local; in hfi1_tid_write_alloc_resources()
3475 lockdep_assert_held(&qp->s_lock); in hfi1_tid_write_alloc_resources()
3482 * scheduled to avoid messing up qp->r_psn: the RNR NAK will in hfi1_tid_write_alloc_resources()
3487 * RNR NAK packet, it will restart with qp->s_last_psn + 1, in hfi1_tid_write_alloc_resources()
3488 * which does not match qp->r_psn and will be dropped. in hfi1_tid_write_alloc_resources()
3492 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND) in hfi1_tid_write_alloc_resources()
3496 if (qpriv->r_tid_alloc == qpriv->r_tid_head) { in hfi1_tid_write_alloc_resources()
3498 if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS && in hfi1_tid_write_alloc_resources()
3499 !qpriv->alloc_w_segs) { in hfi1_tid_write_alloc_resources()
3501 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; in hfi1_tid_write_alloc_resources()
3506 e = &qp->s_ack_queue[qpriv->r_tid_alloc]; in hfi1_tid_write_alloc_resources()
3507 if (e->opcode != TID_OP(WRITE_REQ)) in hfi1_tid_write_alloc_resources()
3510 trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn, in hfi1_tid_write_alloc_resources()
3511 e->lpsn, req); in hfi1_tid_write_alloc_resources()
3513 if (req->alloc_seg >= req->total_segs) in hfi1_tid_write_alloc_resources()
3516 /* Can allocate only a maximum of local->max_write for a QP */ in hfi1_tid_write_alloc_resources()
3517 if (qpriv->alloc_w_segs >= local->max_write) in hfi1_tid_write_alloc_resources()
3521 if (qpriv->sync_pt && qpriv->alloc_w_segs) in hfi1_tid_write_alloc_resources()
3525 if (qpriv->sync_pt && !qpriv->alloc_w_segs) { in hfi1_tid_write_alloc_resources()
3527 qpriv->sync_pt = false; in hfi1_tid_write_alloc_resources()
3528 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; in hfi1_tid_write_alloc_resources()
3532 if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) { in hfi1_tid_write_alloc_resources()
3533 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp); in hfi1_tid_write_alloc_resources()
3537 &rcd->flow_queue); in hfi1_tid_write_alloc_resources()
3542 npkts = rvt_div_round_up_mtu(qp, req->seg_len); in hfi1_tid_write_alloc_resources()
3548 if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) { in hfi1_tid_write_alloc_resources()
3549 qpriv->sync_pt = true; in hfi1_tid_write_alloc_resources()
3554 * If overtaking req->acked_tail, send an RNR NAK. Because the in hfi1_tid_write_alloc_resources()
3560 if (!CIRC_SPACE(req->setup_head, req->acked_tail, in hfi1_tid_write_alloc_resources()
3562 ret = -EAGAIN; in hfi1_tid_write_alloc_resources()
3569 ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last); in hfi1_tid_write_alloc_resources()
3570 if (ret == -EAGAIN) in hfi1_tid_write_alloc_resources()
3571 to_seg = position_in_queue(qpriv, &rcd->rarr_queue); in hfi1_tid_write_alloc_resources()
3575 qpriv->alloc_w_segs++; in hfi1_tid_write_alloc_resources()
3576 req->alloc_seg++; in hfi1_tid_write_alloc_resources()
3580 if (++qpriv->r_tid_alloc > in hfi1_tid_write_alloc_resources()
3581 rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_tid_write_alloc_resources()
3582 qpriv->r_tid_alloc = 0; in hfi1_tid_write_alloc_resources()
3590 if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state) in hfi1_tid_write_alloc_resources()
3596 lockdep_assert_held(&qp->r_lock); in hfi1_tid_write_alloc_resources()
3599 qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK; in hfi1_tid_write_alloc_resources()
3602 qp->r_psn = e->psn + req->alloc_seg; in hfi1_tid_write_alloc_resources()
3603 qp->r_ack_psn = qp->r_psn; in hfi1_tid_write_alloc_resources()
3609 qp->r_head_ack_queue = qpriv->r_tid_alloc + 1; in hfi1_tid_write_alloc_resources()
3610 if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_tid_write_alloc_resources()
3611 qp->r_head_ack_queue = 0; in hfi1_tid_write_alloc_resources()
3612 qpriv->r_tid_head = qp->r_head_ack_queue; in hfi1_tid_write_alloc_resources()
3615 * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock in hfi1_tid_write_alloc_resources()
3618 qp->s_nak_state = qp->r_nak_state; in hfi1_tid_write_alloc_resources()
3619 qp->s_ack_psn = qp->r_ack_psn; in hfi1_tid_write_alloc_resources()
3622 * have modified qp->s_ack_psn here. in hfi1_tid_write_alloc_resources()
3624 qp->s_flags &= ~(RVT_S_ACK_PENDING); in hfi1_tid_write_alloc_resources()
3626 trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn); in hfi1_tid_write_alloc_resources()
3628 * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK in hfi1_tid_write_alloc_resources()
3629 * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be in hfi1_tid_write_alloc_resources()
3630 * used for this because qp->s_lock is dropped before calling in hfi1_tid_write_alloc_resources()
3634 qpriv->rnr_nak_state = TID_RNR_NAK_SEND; in hfi1_tid_write_alloc_resources()
3651 * - Don't allow 0-length requests. in hfi1_rc_rcv_tid_rdma_write_req()
3653 * - Setup struct tid_rdma_req with request info in hfi1_rc_rcv_tid_rdma_write_req()
3654 * - Prepare struct tid_rdma_flow array? in hfi1_rc_rcv_tid_rdma_write_req()
3655 * 3. Set the qp->s_ack_state as state diagram in design doc. in hfi1_rc_rcv_tid_rdma_write_req()
3659 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_write_req()
3660 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_write_req()
3661 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_rc_rcv_tid_rdma_write_req()
3662 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv_tid_rdma_write_req()
3666 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_write_req()
3674 bth0 = be32_to_cpu(ohdr->bth[0]); in hfi1_rc_rcv_tid_rdma_write_req()
3679 psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_write_req()
3682 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) in hfi1_rc_rcv_tid_rdma_write_req()
3685 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) in hfi1_rc_rcv_tid_rdma_write_req()
3688 reth = &ohdr->u.tid_rdma.w_req.reth; in hfi1_rc_rcv_tid_rdma_write_req()
3689 vaddr = be64_to_cpu(reth->vaddr); in hfi1_rc_rcv_tid_rdma_write_req()
3690 len = be32_to_cpu(reth->length); in hfi1_rc_rcv_tid_rdma_write_req()
3692 num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len); in hfi1_rc_rcv_tid_rdma_write_req()
3693 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_rcv_tid_rdma_write_req()
3704 if (qpriv->rnr_nak_state) in hfi1_rc_rcv_tid_rdma_write_req()
3705 qp->r_head_ack_queue = qp->r_head_ack_queue ? in hfi1_rc_rcv_tid_rdma_write_req()
3706 qp->r_head_ack_queue - 1 : in hfi1_rc_rcv_tid_rdma_write_req()
3707 rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in hfi1_rc_rcv_tid_rdma_write_req()
3710 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv_tid_rdma_write_req()
3711 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv_tid_rdma_write_req()
3713 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3714 if (unlikely(next == qp->s_acked_ack_queue)) { in hfi1_rc_rcv_tid_rdma_write_req()
3715 if (!qp->s_ack_queue[next].sent) in hfi1_rc_rcv_tid_rdma_write_req()
3719 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv_tid_rdma_write_req()
3723 if (qpriv->rnr_nak_state) { in hfi1_rc_rcv_tid_rdma_write_req()
3724 qp->r_nak_state = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3725 qp->s_nak_state = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3726 qpriv->rnr_nak_state = TID_RNR_NAK_INIT; in hfi1_rc_rcv_tid_rdma_write_req()
3727 qp->r_psn = e->lpsn + 1; in hfi1_rc_rcv_tid_rdma_write_req()
3728 req->state = TID_REQUEST_INIT; in hfi1_rc_rcv_tid_rdma_write_req()
3738 rkey = be32_to_cpu(reth->rkey); in hfi1_rc_rcv_tid_rdma_write_req()
3739 qp->r_len = len; in hfi1_rc_rcv_tid_rdma_write_req()
3741 if (e->opcode == TID_OP(WRITE_REQ) && in hfi1_rc_rcv_tid_rdma_write_req()
3742 (req->setup_head != req->clear_tail || in hfi1_rc_rcv_tid_rdma_write_req()
3743 req->clear_tail != req->acked_tail)) in hfi1_rc_rcv_tid_rdma_write_req()
3746 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, in hfi1_rc_rcv_tid_rdma_write_req()
3750 qp->r_psn += num_segs - 1; in hfi1_rc_rcv_tid_rdma_write_req()
3752 e->opcode = (bth0 >> 24) & 0xff; in hfi1_rc_rcv_tid_rdma_write_req()
3753 e->psn = psn; in hfi1_rc_rcv_tid_rdma_write_req()
3754 e->lpsn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_write_req()
3755 e->sent = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3757 req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write); in hfi1_rc_rcv_tid_rdma_write_req()
3758 req->state = TID_REQUEST_INIT; in hfi1_rc_rcv_tid_rdma_write_req()
3759 req->cur_seg = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3760 req->comp_seg = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3761 req->ack_seg = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3762 req->alloc_seg = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3763 req->isge = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3764 req->seg_len = qpriv->tid_rdma.local.max_len; in hfi1_rc_rcv_tid_rdma_write_req()
3765 req->total_len = len; in hfi1_rc_rcv_tid_rdma_write_req()
3766 req->total_segs = num_segs; in hfi1_rc_rcv_tid_rdma_write_req()
3767 req->r_flow_psn = e->psn; in hfi1_rc_rcv_tid_rdma_write_req()
3768 req->ss.sge = e->rdma_sge; in hfi1_rc_rcv_tid_rdma_write_req()
3769 req->ss.num_sge = 1; in hfi1_rc_rcv_tid_rdma_write_req()
3771 req->flow_idx = req->setup_head; in hfi1_rc_rcv_tid_rdma_write_req()
3772 req->clear_tail = req->setup_head; in hfi1_rc_rcv_tid_rdma_write_req()
3773 req->acked_tail = req->setup_head; in hfi1_rc_rcv_tid_rdma_write_req()
3775 qp->r_state = e->opcode; in hfi1_rc_rcv_tid_rdma_write_req()
3776 qp->r_nak_state = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3782 qp->r_msn++; in hfi1_rc_rcv_tid_rdma_write_req()
3783 qp->r_psn++; in hfi1_rc_rcv_tid_rdma_write_req()
3785 trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn, in hfi1_rc_rcv_tid_rdma_write_req()
3788 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) { in hfi1_rc_rcv_tid_rdma_write_req()
3789 qpriv->r_tid_tail = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3790 } else if (qpriv->r_tid_tail == qpriv->r_tid_head) { in hfi1_rc_rcv_tid_rdma_write_req()
3793 e = &qp->s_ack_queue[qpriv->r_tid_tail]; in hfi1_rc_rcv_tid_rdma_write_req()
3796 if (e->opcode != TID_OP(WRITE_REQ) || in hfi1_rc_rcv_tid_rdma_write_req()
3797 ptr->comp_seg == ptr->total_segs) { in hfi1_rc_rcv_tid_rdma_write_req()
3798 if (qpriv->r_tid_tail == qpriv->r_tid_ack) in hfi1_rc_rcv_tid_rdma_write_req()
3799 qpriv->r_tid_ack = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3800 qpriv->r_tid_tail = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3804 qp->r_head_ack_queue = next; in hfi1_rc_rcv_tid_rdma_write_req()
3805 qpriv->r_tid_head = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3811 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv_tid_rdma_write_req()
3813 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_write_req()
3816 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3820 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3823 qp->r_nak_state = IB_NAK_INVALID_REQUEST; in hfi1_rc_rcv_tid_rdma_write_req()
3824 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_write_req()
3829 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3831 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; in hfi1_rc_rcv_tid_rdma_write_req()
3832 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_write_req()
3840 struct hfi1_ack_priv *epriv = e->priv; in hfi1_build_tid_rdma_write_resp()
3841 struct tid_rdma_request *req = &epriv->tid_req; in hfi1_build_tid_rdma_write_resp()
3842 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_write_resp()
3848 trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn, in hfi1_build_tid_rdma_write_resp()
3852 flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_write_resp()
3853 switch (req->state) { in hfi1_build_tid_rdma_write_resp()
3862 if (req->cur_seg >= req->alloc_seg) in hfi1_build_tid_rdma_write_resp()
3869 if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT) in hfi1_build_tid_rdma_write_resp()
3872 req->state = TID_REQUEST_ACTIVE; in hfi1_build_tid_rdma_write_resp()
3873 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3874 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS); in hfi1_build_tid_rdma_write_resp()
3880 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3881 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS); in hfi1_build_tid_rdma_write_resp()
3882 if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS)) in hfi1_build_tid_rdma_write_resp()
3883 req->state = TID_REQUEST_ACTIVE; in hfi1_build_tid_rdma_write_resp()
3888 flow->flow_state.resp_ib_psn = bth2; in hfi1_build_tid_rdma_write_resp()
3889 resp_addr = (void *)flow->tid_entry; in hfi1_build_tid_rdma_write_resp()
3890 resp_len = sizeof(*flow->tid_entry) * flow->tidcnt; in hfi1_build_tid_rdma_write_resp()
3891 req->cur_seg++; in hfi1_build_tid_rdma_write_resp()
3893 memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp)); in hfi1_build_tid_rdma_write_resp()
3894 epriv->ss.sge.vaddr = resp_addr; in hfi1_build_tid_rdma_write_resp()
3895 epriv->ss.sge.sge_length = resp_len; in hfi1_build_tid_rdma_write_resp()
3896 epriv->ss.sge.length = epriv->ss.sge.sge_length; in hfi1_build_tid_rdma_write_resp()
3901 epriv->ss.sge.mr = NULL; in hfi1_build_tid_rdma_write_resp()
3902 epriv->ss.sge.m = 0; in hfi1_build_tid_rdma_write_resp()
3903 epriv->ss.sge.n = 0; in hfi1_build_tid_rdma_write_resp()
3905 epriv->ss.sg_list = NULL; in hfi1_build_tid_rdma_write_resp()
3906 epriv->ss.total_len = epriv->ss.sge.sge_length; in hfi1_build_tid_rdma_write_resp()
3907 epriv->ss.num_sge = 1; in hfi1_build_tid_rdma_write_resp()
3909 *ss = &epriv->ss; in hfi1_build_tid_rdma_write_resp()
3910 *len = epriv->ss.total_len; in hfi1_build_tid_rdma_write_resp()
3914 remote = rcu_dereference(qpriv->tid_rdma.remote); in hfi1_build_tid_rdma_write_resp()
3916 KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1); in hfi1_build_tid_rdma_write_resp()
3917 KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey); in hfi1_build_tid_rdma_write_resp()
3918 ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_write_resp()
3919 ohdr->u.tid_rdma.w_rsp.tid_flow_psn = in hfi1_build_tid_rdma_write_resp()
3920 cpu_to_be32((flow->flow_state.generation << in hfi1_build_tid_rdma_write_resp()
3922 (flow->flow_state.spsn & in hfi1_build_tid_rdma_write_resp()
3924 ohdr->u.tid_rdma.w_rsp.tid_flow_qp = in hfi1_build_tid_rdma_write_resp()
3925 cpu_to_be32(qpriv->tid_rdma.local.qp | in hfi1_build_tid_rdma_write_resp()
3926 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_write_resp()
3928 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_write_resp()
3929 ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_write_resp()
3930 *bth1 = remote->qp; in hfi1_build_tid_rdma_write_resp()
3932 hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32); in hfi1_build_tid_rdma_write_resp()
3933 qpriv->pending_tid_w_segs++; in hfi1_build_tid_rdma_write_resp()
3940 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_add_tid_reap_timer()
3942 lockdep_assert_held(&qp->s_lock); in hfi1_add_tid_reap_timer()
3943 if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) { in hfi1_add_tid_reap_timer()
3944 qpriv->s_flags |= HFI1_R_TID_RSC_TIMER; in hfi1_add_tid_reap_timer()
3945 qpriv->s_tid_timer.expires = jiffies + in hfi1_add_tid_reap_timer()
3946 qpriv->tid_timer_timeout_jiffies; in hfi1_add_tid_reap_timer()
3947 add_timer(&qpriv->s_tid_timer); in hfi1_add_tid_reap_timer()
3953 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_mod_tid_reap_timer()
3955 lockdep_assert_held(&qp->s_lock); in hfi1_mod_tid_reap_timer()
3956 qpriv->s_flags |= HFI1_R_TID_RSC_TIMER; in hfi1_mod_tid_reap_timer()
3957 mod_timer(&qpriv->s_tid_timer, jiffies + in hfi1_mod_tid_reap_timer()
3958 qpriv->tid_timer_timeout_jiffies); in hfi1_mod_tid_reap_timer()
3963 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_stop_tid_reap_timer()
3966 lockdep_assert_held(&qp->s_lock); in hfi1_stop_tid_reap_timer()
3967 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { in hfi1_stop_tid_reap_timer()
3968 rval = del_timer(&qpriv->s_tid_timer); in hfi1_stop_tid_reap_timer()
3969 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; in hfi1_stop_tid_reap_timer()
3976 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_del_tid_reap_timer()
3978 del_timer_sync(&qpriv->s_tid_timer); in hfi1_del_tid_reap_timer()
3979 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; in hfi1_del_tid_reap_timer()
3985 struct rvt_qp *qp = qpriv->owner; in hfi1_tid_timeout()
3986 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in hfi1_tid_timeout()
3990 spin_lock_irqsave(&qp->r_lock, flags); in hfi1_tid_timeout()
3991 spin_lock(&qp->s_lock); in hfi1_tid_timeout()
3992 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { in hfi1_tid_timeout()
3993 dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n", in hfi1_tid_timeout()
3994 qp->ibqp.qp_num, __func__, __LINE__); in hfi1_tid_timeout()
3997 (u64)qpriv->tid_timer_timeout_jiffies); in hfi1_tid_timeout()
4003 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_tid_timeout()
4006 ack_to_tid_req(&qp->s_ack_queue[i]); in hfi1_tid_timeout()
4010 spin_unlock(&qp->s_lock); in hfi1_tid_timeout()
4011 if (qp->ibqp.event_handler) { in hfi1_tid_timeout()
4014 ev.device = qp->ibqp.device; in hfi1_tid_timeout()
4015 ev.element.qp = &qp->ibqp; in hfi1_tid_timeout()
4017 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in hfi1_tid_timeout()
4022 spin_unlock(&qp->s_lock); in hfi1_tid_timeout()
4024 spin_unlock_irqrestore(&qp->r_lock, flags); in hfi1_tid_timeout()
4037 * 5. Set qp->s_state in hfi1_rc_rcv_tid_rdma_write_resp()
4040 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv_tid_rdma_write_resp()
4041 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_write_resp()
4042 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_write_resp()
4043 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_write_resp()
4053 psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_write_resp()
4054 aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth); in hfi1_rc_rcv_tid_rdma_write_resp()
4055 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; in hfi1_rc_rcv_tid_rdma_write_resp()
4057 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_resp()
4060 if (cmp_psn(psn, qp->s_next_psn) >= 0) in hfi1_rc_rcv_tid_rdma_write_resp()
4064 if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0)) in hfi1_rc_rcv_tid_rdma_write_resp()
4067 if (unlikely(qp->s_acked == qp->s_tail)) in hfi1_rc_rcv_tid_rdma_write_resp()
4075 if (qp->r_flags & RVT_R_RDMAR_SEQ) { in hfi1_rc_rcv_tid_rdma_write_resp()
4076 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) in hfi1_rc_rcv_tid_rdma_write_resp()
4078 qp->r_flags &= ~RVT_R_RDMAR_SEQ; in hfi1_rc_rcv_tid_rdma_write_resp()
4081 wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); in hfi1_rc_rcv_tid_rdma_write_resp()
4082 if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)) in hfi1_rc_rcv_tid_rdma_write_resp()
4091 if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS)) in hfi1_rc_rcv_tid_rdma_write_resp()
4106 flow = &req->flows[req->setup_head]; in hfi1_rc_rcv_tid_rdma_write_resp()
4107 flow->pkt = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4108 flow->tid_idx = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4109 flow->tid_offset = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4110 flow->sent = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4111 flow->resync_npkts = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4112 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp); in hfi1_rc_rcv_tid_rdma_write_resp()
4113 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & in hfi1_rc_rcv_tid_rdma_write_resp()
4115 flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn)); in hfi1_rc_rcv_tid_rdma_write_resp()
4116 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; in hfi1_rc_rcv_tid_rdma_write_resp()
4117 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_resp()
4118 flow->flow_state.resp_ib_psn = psn; in hfi1_rc_rcv_tid_rdma_write_resp()
4119 flow->length = min_t(u32, req->seg_len, in hfi1_rc_rcv_tid_rdma_write_resp()
4120 (wqe->length - (req->comp_seg * req->seg_len))); in hfi1_rc_rcv_tid_rdma_write_resp()
4122 flow->npkts = rvt_div_round_up_mtu(qp, flow->length); in hfi1_rc_rcv_tid_rdma_write_resp()
4123 flow->flow_state.lpsn = flow->flow_state.spsn + in hfi1_rc_rcv_tid_rdma_write_resp()
4124 flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_write_resp()
4125 /* payload length = packet length - (header length + ICRC length) */ in hfi1_rc_rcv_tid_rdma_write_resp()
4126 pktlen = packet->tlen - (packet->hlen + 4); in hfi1_rc_rcv_tid_rdma_write_resp()
4127 if (pktlen > sizeof(flow->tid_entry)) { in hfi1_rc_rcv_tid_rdma_write_resp()
4131 memcpy(flow->tid_entry, packet->ebuf, pktlen); in hfi1_rc_rcv_tid_rdma_write_resp()
4132 flow->tidcnt = pktlen / sizeof(*flow->tid_entry); in hfi1_rc_rcv_tid_rdma_write_resp()
4133 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow); in hfi1_rc_rcv_tid_rdma_write_resp()
4135 req->comp_seg++; in hfi1_rc_rcv_tid_rdma_write_resp()
4141 for (i = 0; i < flow->tidcnt; i++) { in hfi1_rc_rcv_tid_rdma_write_resp()
4143 qp, i, flow->tid_entry[i]); in hfi1_rc_rcv_tid_rdma_write_resp()
4144 if (!EXP_TID_GET(flow->tid_entry[i], LEN)) { in hfi1_rc_rcv_tid_rdma_write_resp()
4148 tidlen += EXP_TID_GET(flow->tid_entry[i], LEN); in hfi1_rc_rcv_tid_rdma_write_resp()
4150 if (tidlen * PAGE_SIZE < flow->length) { in hfi1_rc_rcv_tid_rdma_write_resp()
4155 trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_write_resp()
4156 wqe->lpsn, req); in hfi1_rc_rcv_tid_rdma_write_resp()
4161 if (!cmp_psn(psn, wqe->psn)) { in hfi1_rc_rcv_tid_rdma_write_resp()
4162 req->r_last_acked = mask_psn(wqe->psn - 1); in hfi1_rc_rcv_tid_rdma_write_resp()
4164 req->acked_tail = req->setup_head; in hfi1_rc_rcv_tid_rdma_write_resp()
4168 req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS); in hfi1_rc_rcv_tid_rdma_write_resp()
4169 req->state = TID_REQUEST_ACTIVE; in hfi1_rc_rcv_tid_rdma_write_resp()
4174 * Since TID RDMA requests could be mixed in with regular IB requests, in hfi1_rc_rcv_tid_rdma_write_resp()
4178 if (qpriv->s_tid_cur != qpriv->s_tid_head && in hfi1_rc_rcv_tid_rdma_write_resp()
4179 req->comp_seg == req->total_segs) { in hfi1_rc_rcv_tid_rdma_write_resp()
4180 for (i = qpriv->s_tid_cur + 1; ; i++) { in hfi1_rc_rcv_tid_rdma_write_resp()
4181 if (i == qp->s_size) in hfi1_rc_rcv_tid_rdma_write_resp()
4184 if (i == qpriv->s_tid_head) in hfi1_rc_rcv_tid_rdma_write_resp()
4186 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in hfi1_rc_rcv_tid_rdma_write_resp()
4189 qpriv->s_tid_cur = i; in hfi1_rc_rcv_tid_rdma_write_resp()
4191 qp->s_flags &= ~HFI1_S_WAIT_TID_RESP; in hfi1_rc_rcv_tid_rdma_write_resp()
4201 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_write_resp()
4202 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_resp()
4210 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_build_tid_rdma_packet()
4212 struct rvt_qp *qp = req->qp; in hfi1_build_tid_rdma_packet()
4213 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_packet()
4214 u32 tidentry = flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_packet()
4216 struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data; in hfi1_build_tid_rdma_packet()
4225 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_packet()
4226 flow->sent += *len; in hfi1_build_tid_rdma_packet()
4227 next_offset = flow->tid_offset + *len; in hfi1_build_tid_rdma_packet()
4228 last_pkt = (flow->tid_idx == (flow->tidcnt - 1) && in hfi1_build_tid_rdma_packet()
4229 next_offset >= tidlen) || (flow->sent >= flow->length); in hfi1_build_tid_rdma_packet()
4230 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_packet()
4231 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_packet()
4234 remote = rcu_dereference(qpriv->tid_rdma.remote); in hfi1_build_tid_rdma_packet()
4235 KDETH_RESET(wd->kdeth0, KVER, 0x1); in hfi1_build_tid_rdma_packet()
4236 KDETH_SET(wd->kdeth0, SH, !last_pkt); in hfi1_build_tid_rdma_packet()
4237 KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg)); in hfi1_build_tid_rdma_packet()
4238 KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL)); in hfi1_build_tid_rdma_packet()
4239 KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX)); in hfi1_build_tid_rdma_packet()
4240 KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE); in hfi1_build_tid_rdma_packet()
4241 KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om); in hfi1_build_tid_rdma_packet()
4242 KDETH_RESET(wd->kdeth1, JKEY, remote->jkey); in hfi1_build_tid_rdma_packet()
4243 wd->verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_packet()
4246 *bth1 = flow->tid_qpn; in hfi1_build_tid_rdma_packet()
4247 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & in hfi1_build_tid_rdma_packet()
4249 (flow->flow_state.generation << in hfi1_build_tid_rdma_packet()
4252 /* PSNs are zero-based, so +1 to count number of packets */ in hfi1_build_tid_rdma_packet()
4253 if (flow->flow_state.lpsn + 1 + in hfi1_build_tid_rdma_packet()
4254 rvt_div_round_up_mtu(qp, req->seg_len) > in hfi1_build_tid_rdma_packet()
4256 req->state = TID_REQUEST_SYNC; in hfi1_build_tid_rdma_packet()
4261 flow->tid_offset = 0; in hfi1_build_tid_rdma_packet()
4262 flow->tid_idx++; in hfi1_build_tid_rdma_packet()
4264 flow->tid_offset = next_offset; in hfi1_build_tid_rdma_packet()
4271 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_write_data()
4272 struct hfi1_qp_priv *priv = qp->priv; in hfi1_rc_rcv_tid_rdma_write_data()
4273 struct hfi1_ctxtdata *rcd = priv->rcd; in hfi1_rc_rcv_tid_rdma_write_data()
4274 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv_tid_rdma_write_data()
4278 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_rc_rcv_tid_rdma_write_data()
4285 psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_write_data()
4286 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; in hfi1_rc_rcv_tid_rdma_write_data()
4292 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_data()
4293 e = &qp->s_ack_queue[priv->r_tid_tail]; in hfi1_rc_rcv_tid_rdma_write_data()
4295 flow = &req->flows[req->clear_tail]; in hfi1_rc_rcv_tid_rdma_write_data()
4296 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) { in hfi1_rc_rcv_tid_rdma_write_data()
4299 if (cmp_psn(psn, flow->flow_state.r_next_psn)) in hfi1_rc_rcv_tid_rdma_write_data()
4302 flow->flow_state.r_next_psn = mask_psn(psn + 1); in hfi1_rc_rcv_tid_rdma_write_data()
4310 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) { in hfi1_rc_rcv_tid_rdma_write_data()
4313 u32 tlen = packet->tlen; in hfi1_rc_rcv_tid_rdma_write_data()
4314 u16 hdrsize = packet->hlen; in hfi1_rc_rcv_tid_rdma_write_data()
4315 u8 pad = packet->pad; in hfi1_rc_rcv_tid_rdma_write_data()
4316 u8 extra_bytes = pad + packet->extra_byte + in hfi1_rc_rcv_tid_rdma_write_data()
4318 u32 pmtu = qp->pmtu; in hfi1_rc_rcv_tid_rdma_write_data()
4322 len = req->comp_seg * req->seg_len; in hfi1_rc_rcv_tid_rdma_write_data()
4324 full_flow_psn(flow, flow->flow_state.spsn)) * in hfi1_rc_rcv_tid_rdma_write_data()
4326 if (unlikely(req->total_len - len < pmtu)) in hfi1_rc_rcv_tid_rdma_write_data()
4330 * The e->rdma_sge field is set when TID RDMA WRITE REQ in hfi1_rc_rcv_tid_rdma_write_data()
4333 ss.sge = e->rdma_sge; in hfi1_rc_rcv_tid_rdma_write_data()
4336 ss.total_len = req->total_len; in hfi1_rc_rcv_tid_rdma_write_data()
4338 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false, in hfi1_rc_rcv_tid_rdma_write_data()
4341 priv->r_next_psn_kdeth = mask_psn(psn + 1); in hfi1_rc_rcv_tid_rdma_write_data()
4342 priv->s_flags |= HFI1_R_TID_SW_PSN; in hfi1_rc_rcv_tid_rdma_write_data()
4346 flow->flow_state.r_next_psn = mask_psn(psn + 1); in hfi1_rc_rcv_tid_rdma_write_data()
4348 priv->alloc_w_segs--; in hfi1_rc_rcv_tid_rdma_write_data()
4349 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_data()
4350 req->comp_seg++; in hfi1_rc_rcv_tid_rdma_write_data()
4351 priv->s_nak_state = 0; in hfi1_rc_rcv_tid_rdma_write_data()
4355 * - The request has reached a sync point AND all outstanding in hfi1_rc_rcv_tid_rdma_write_data()
4357 * - The entire request is complete and there are no more requests in hfi1_rc_rcv_tid_rdma_write_data()
4361 trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn, in hfi1_rc_rcv_tid_rdma_write_data()
4368 for (next = priv->r_tid_tail + 1; ; next++) { in hfi1_rc_rcv_tid_rdma_write_data()
4369 if (next > rvt_size_atomic(&dev->rdi)) in hfi1_rc_rcv_tid_rdma_write_data()
4371 if (next == priv->r_tid_head) in hfi1_rc_rcv_tid_rdma_write_data()
4373 e = &qp->s_ack_queue[next]; in hfi1_rc_rcv_tid_rdma_write_data()
4374 if (e->opcode == TID_OP(WRITE_REQ)) in hfi1_rc_rcv_tid_rdma_write_data()
4377 priv->r_tid_tail = next; in hfi1_rc_rcv_tid_rdma_write_data()
4378 if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi)) in hfi1_rc_rcv_tid_rdma_write_data()
4379 qp->s_acked_ack_queue = 0; in hfi1_rc_rcv_tid_rdma_write_data()
4388 if (req->cur_seg < req->total_segs || in hfi1_rc_rcv_tid_rdma_write_data()
4389 qp->s_tail_ack_queue != qp->r_head_ack_queue) { in hfi1_rc_rcv_tid_rdma_write_data()
4390 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv_tid_rdma_write_data()
4394 priv->pending_tid_w_segs--; in hfi1_rc_rcv_tid_rdma_write_data()
4395 if (priv->s_flags & HFI1_R_TID_RSC_TIMER) { in hfi1_rc_rcv_tid_rdma_write_data()
4396 if (priv->pending_tid_w_segs) in hfi1_rc_rcv_tid_rdma_write_data()
4397 hfi1_mod_tid_reap_timer(req->qp); in hfi1_rc_rcv_tid_rdma_write_data()
4399 hfi1_stop_tid_reap_timer(req->qp); in hfi1_rc_rcv_tid_rdma_write_data()
4405 priv->r_next_psn_kdeth = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_write_data()
4407 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_write_data()
4408 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_data()
4412 if (!priv->s_nak_state) { in hfi1_rc_rcv_tid_rdma_write_data()
4413 priv->s_nak_state = IB_NAK_PSN_ERROR; in hfi1_rc_rcv_tid_rdma_write_data()
4414 priv->s_nak_psn = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_write_data()
4430 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_write_ack()
4431 struct tid_flow_state *fs = &qpriv->flow_state; in hfi1_build_tid_rdma_write_ack()
4433 struct tid_rdma_flow *flow = &req->flows[iflow]; in hfi1_build_tid_rdma_write_ack()
4437 remote = rcu_dereference(qpriv->tid_rdma.remote); in hfi1_build_tid_rdma_write_ack()
4438 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey); in hfi1_build_tid_rdma_write_ack()
4439 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_write_ack()
4440 *bth1 = remote->qp; in hfi1_build_tid_rdma_write_ack()
4443 if (qpriv->resync) { in hfi1_build_tid_rdma_write_ack()
4444 *bth2 = mask_psn((fs->generation << in hfi1_build_tid_rdma_write_ack()
4445 HFI1_KDETH_BTH_SEQ_SHIFT) - 1); in hfi1_build_tid_rdma_write_ack()
4446 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_write_ack()
4447 } else if (qpriv->s_nak_state) { in hfi1_build_tid_rdma_write_ack()
4448 *bth2 = mask_psn(qpriv->s_nak_psn); in hfi1_build_tid_rdma_write_ack()
4449 ohdr->u.tid_rdma.ack.aeth = in hfi1_build_tid_rdma_write_ack()
4450 cpu_to_be32((qp->r_msn & IB_MSN_MASK) | in hfi1_build_tid_rdma_write_ack()
4451 (qpriv->s_nak_state << in hfi1_build_tid_rdma_write_ack()
4454 *bth2 = full_flow_psn(flow, flow->flow_state.lpsn); in hfi1_build_tid_rdma_write_ack()
4455 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_write_ack()
4457 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1); in hfi1_build_tid_rdma_write_ack()
4458 ohdr->u.tid_rdma.ack.tid_flow_qp = in hfi1_build_tid_rdma_write_ack()
4459 cpu_to_be32(qpriv->tid_rdma.local.qp | in hfi1_build_tid_rdma_write_ack()
4460 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_write_ack()
4462 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_write_ack()
4464 ohdr->u.tid_rdma.ack.tid_flow_psn = 0; in hfi1_build_tid_rdma_write_ack()
4465 ohdr->u.tid_rdma.ack.verbs_psn = in hfi1_build_tid_rdma_write_ack()
4466 cpu_to_be32(flow->flow_state.resp_ib_psn); in hfi1_build_tid_rdma_write_ack()
4468 if (qpriv->resync) { in hfi1_build_tid_rdma_write_ack()
4475 if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) { in hfi1_build_tid_rdma_write_ack()
4476 ohdr->u.tid_rdma.ack.tid_flow_psn = in hfi1_build_tid_rdma_write_ack()
4477 cpu_to_be32(qpriv->r_next_psn_kdeth_save); in hfi1_build_tid_rdma_write_ack()
4482 * of r_next_psn_kdeth in the case of back-to-back in hfi1_build_tid_rdma_write_ack()
4485 qpriv->r_next_psn_kdeth_save = in hfi1_build_tid_rdma_write_ack()
4486 qpriv->r_next_psn_kdeth - 1; in hfi1_build_tid_rdma_write_ack()
4487 ohdr->u.tid_rdma.ack.tid_flow_psn = in hfi1_build_tid_rdma_write_ack()
4488 cpu_to_be32(qpriv->r_next_psn_kdeth_save); in hfi1_build_tid_rdma_write_ack()
4489 qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1); in hfi1_build_tid_rdma_write_ack()
4491 qpriv->resync = false; in hfi1_build_tid_rdma_write_ack()
4494 return sizeof(ohdr->u.tid_rdma.ack) / sizeof(u32); in hfi1_build_tid_rdma_write_ack()
4499 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv_tid_rdma_ack()
4500 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_ack()
4501 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_ack()
4511 psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_ack()
4512 aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth); in hfi1_rc_rcv_tid_rdma_ack()
4513 req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn)); in hfi1_rc_rcv_tid_rdma_ack()
4514 resync_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.tid_flow_psn)); in hfi1_rc_rcv_tid_rdma_ack()
4516 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_ack()
4520 if ((qp->s_flags & HFI1_S_WAIT_HALT) && in hfi1_rc_rcv_tid_rdma_ack()
4521 cmp_psn(psn, qpriv->s_resync_psn)) in hfi1_rc_rcv_tid_rdma_ack()
4530 ack_psn--; in hfi1_rc_rcv_tid_rdma_ack()
4531 ack_kpsn--; in hfi1_rc_rcv_tid_rdma_ack()
4534 if (unlikely(qp->s_acked == qp->s_tail)) in hfi1_rc_rcv_tid_rdma_ack()
4537 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_rcv_tid_rdma_ack()
4539 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) in hfi1_rc_rcv_tid_rdma_ack()
4543 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_ack()
4544 wqe->lpsn, req); in hfi1_rc_rcv_tid_rdma_ack()
4545 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4546 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4549 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 || in hfi1_rc_rcv_tid_rdma_ack()
4550 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0) in hfi1_rc_rcv_tid_rdma_ack()
4554 full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 && in hfi1_rc_rcv_tid_rdma_ack()
4555 req->ack_seg < req->cur_seg) { in hfi1_rc_rcv_tid_rdma_ack()
4556 req->ack_seg++; in hfi1_rc_rcv_tid_rdma_ack()
4558 req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS); in hfi1_rc_rcv_tid_rdma_ack()
4559 req->r_last_acked = flow->flow_state.resp_ib_psn; in hfi1_rc_rcv_tid_rdma_ack()
4560 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_ack()
4561 wqe->lpsn, req); in hfi1_rc_rcv_tid_rdma_ack()
4562 if (req->ack_seg == req->total_segs) { in hfi1_rc_rcv_tid_rdma_ack()
4563 req->state = TID_REQUEST_COMPLETE; in hfi1_rc_rcv_tid_rdma_ack()
4565 to_iport(qp->ibqp.device, in hfi1_rc_rcv_tid_rdma_ack()
4566 qp->port_num)); in hfi1_rc_rcv_tid_rdma_ack()
4568 atomic_dec(&qpriv->n_tid_requests); in hfi1_rc_rcv_tid_rdma_ack()
4569 if (qp->s_acked == qp->s_tail) in hfi1_rc_rcv_tid_rdma_ack()
4571 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) in hfi1_rc_rcv_tid_rdma_ack()
4575 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4576 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4579 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_ack()
4580 wqe->lpsn, req); in hfi1_rc_rcv_tid_rdma_ack()
4583 if (qpriv->s_flags & RVT_S_WAIT_ACK) in hfi1_rc_rcv_tid_rdma_ack()
4584 qpriv->s_flags &= ~RVT_S_WAIT_ACK; in hfi1_rc_rcv_tid_rdma_ack()
4587 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && in hfi1_rc_rcv_tid_rdma_ack()
4588 req->ack_seg < req->cur_seg) in hfi1_rc_rcv_tid_rdma_ack()
4600 qp->s_flags &= ~HFI1_S_WAIT_HALT; in hfi1_rc_rcv_tid_rdma_ack()
4607 qpriv->s_flags &= ~RVT_S_SEND_ONE; in hfi1_rc_rcv_tid_rdma_ack()
4610 if ((qp->s_acked == qpriv->s_tid_tail && in hfi1_rc_rcv_tid_rdma_ack()
4611 req->ack_seg == req->total_segs) || in hfi1_rc_rcv_tid_rdma_ack()
4612 qp->s_acked == qp->s_tail) { in hfi1_rc_rcv_tid_rdma_ack()
4613 qpriv->s_state = TID_OP(WRITE_DATA_LAST); in hfi1_rc_rcv_tid_rdma_ack()
4617 if (req->ack_seg == req->comp_seg) { in hfi1_rc_rcv_tid_rdma_ack()
4618 qpriv->s_state = TID_OP(WRITE_DATA); in hfi1_rc_rcv_tid_rdma_ack()
4634 if (delta_psn(ack_psn, wqe->lpsn)) in hfi1_rc_rcv_tid_rdma_ack()
4635 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_rcv_tid_rdma_ack()
4637 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4639 * RESYNC re-numbers the PSN ranges of all remaining in hfi1_rc_rcv_tid_rdma_ack()
4642 * default number of packets. flow->resync_npkts is used in hfi1_rc_rcv_tid_rdma_ack()
4647 fpsn = full_flow_psn(flow, flow->flow_state.spsn); in hfi1_rc_rcv_tid_rdma_ack()
4648 req->r_ack_psn = psn; in hfi1_rc_rcv_tid_rdma_ack()
4655 if (flow->flow_state.generation != in hfi1_rc_rcv_tid_rdma_ack()
4657 resync_psn = mask_psn(fpsn - 1); in hfi1_rc_rcv_tid_rdma_ack()
4658 flow->resync_npkts += in hfi1_rc_rcv_tid_rdma_ack()
4664 last_acked = qp->s_acked; in hfi1_rc_rcv_tid_rdma_ack()
4668 for (fidx = rptr->acked_tail; in hfi1_rc_rcv_tid_rdma_ack()
4669 CIRC_CNT(rptr->setup_head, fidx, in hfi1_rc_rcv_tid_rdma_ack()
4675 flow = &rptr->flows[fidx]; in hfi1_rc_rcv_tid_rdma_ack()
4676 gen = flow->flow_state.generation; in hfi1_rc_rcv_tid_rdma_ack()
4678 flow->flow_state.spsn != in hfi1_rc_rcv_tid_rdma_ack()
4681 lpsn = flow->flow_state.lpsn; in hfi1_rc_rcv_tid_rdma_ack()
4683 flow->npkts = in hfi1_rc_rcv_tid_rdma_ack()
4687 flow->flow_state.generation = in hfi1_rc_rcv_tid_rdma_ack()
4689 flow->flow_state.spsn = spsn; in hfi1_rc_rcv_tid_rdma_ack()
4690 flow->flow_state.lpsn = in hfi1_rc_rcv_tid_rdma_ack()
4691 flow->flow_state.spsn + in hfi1_rc_rcv_tid_rdma_ack()
4692 flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_ack()
4693 flow->pkt = 0; in hfi1_rc_rcv_tid_rdma_ack()
4694 spsn += flow->npkts; in hfi1_rc_rcv_tid_rdma_ack()
4695 resync_psn += flow->npkts; in hfi1_rc_rcv_tid_rdma_ack()
4700 if (++last_acked == qpriv->s_tid_cur + 1) in hfi1_rc_rcv_tid_rdma_ack()
4702 if (last_acked == qp->s_size) in hfi1_rc_rcv_tid_rdma_ack()
4707 req->cur_seg = req->ack_seg; in hfi1_rc_rcv_tid_rdma_ack()
4708 qpriv->s_tid_tail = qp->s_acked; in hfi1_rc_rcv_tid_rdma_ack()
4709 qpriv->s_state = TID_OP(WRITE_REQ); in hfi1_rc_rcv_tid_rdma_ack()
4713 qpriv->s_retry = qp->s_retry_cnt; in hfi1_rc_rcv_tid_rdma_ack()
4721 if (!req->flows) in hfi1_rc_rcv_tid_rdma_ack()
4723 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4724 flpsn = full_flow_psn(flow, flow->flow_state.lpsn); in hfi1_rc_rcv_tid_rdma_ack()
4727 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, in hfi1_rc_rcv_tid_rdma_ack()
4729 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_ack()
4730 req->cur_seg = req->ack_seg; in hfi1_rc_rcv_tid_rdma_ack()
4731 qpriv->s_tid_tail = qp->s_acked; in hfi1_rc_rcv_tid_rdma_ack()
4732 qpriv->s_state = TID_OP(WRITE_REQ); in hfi1_rc_rcv_tid_rdma_ack()
4733 qpriv->s_retry = qp->s_retry_cnt; in hfi1_rc_rcv_tid_rdma_ack()
4747 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_ack()
4752 struct hfi1_qp_priv *priv = qp->priv; in hfi1_add_tid_retry_timer()
4753 struct ib_qp *ibqp = &qp->ibqp; in hfi1_add_tid_retry_timer()
4754 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in hfi1_add_tid_retry_timer()
4756 lockdep_assert_held(&qp->s_lock); in hfi1_add_tid_retry_timer()
4757 if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) { in hfi1_add_tid_retry_timer()
4758 priv->s_flags |= HFI1_S_TID_RETRY_TIMER; in hfi1_add_tid_retry_timer()
4759 priv->s_tid_retry_timer.expires = jiffies + in hfi1_add_tid_retry_timer()
4760 priv->tid_retry_timeout_jiffies + rdi->busy_jiffies; in hfi1_add_tid_retry_timer()
4761 add_timer(&priv->s_tid_retry_timer); in hfi1_add_tid_retry_timer()
4767 struct hfi1_qp_priv *priv = qp->priv; in hfi1_mod_tid_retry_timer()
4768 struct ib_qp *ibqp = &qp->ibqp; in hfi1_mod_tid_retry_timer()
4769 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in hfi1_mod_tid_retry_timer()
4771 lockdep_assert_held(&qp->s_lock); in hfi1_mod_tid_retry_timer()
4772 priv->s_flags |= HFI1_S_TID_RETRY_TIMER; in hfi1_mod_tid_retry_timer()
4773 mod_timer(&priv->s_tid_retry_timer, jiffies + in hfi1_mod_tid_retry_timer()
4774 priv->tid_retry_timeout_jiffies + rdi->busy_jiffies); in hfi1_mod_tid_retry_timer()
4779 struct hfi1_qp_priv *priv = qp->priv; in hfi1_stop_tid_retry_timer()
4782 lockdep_assert_held(&qp->s_lock); in hfi1_stop_tid_retry_timer()
4783 if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { in hfi1_stop_tid_retry_timer()
4784 rval = del_timer(&priv->s_tid_retry_timer); in hfi1_stop_tid_retry_timer()
4785 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; in hfi1_stop_tid_retry_timer()
4792 struct hfi1_qp_priv *priv = qp->priv; in hfi1_del_tid_retry_timer()
4794 del_timer_sync(&priv->s_tid_retry_timer); in hfi1_del_tid_retry_timer()
4795 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; in hfi1_del_tid_retry_timer()
4801 struct rvt_qp *qp = priv->owner; in hfi1_tid_retry_timeout()
4806 spin_lock_irqsave(&qp->r_lock, flags); in hfi1_tid_retry_timeout()
4807 spin_lock(&qp->s_lock); in hfi1_tid_retry_timeout()
4809 if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { in hfi1_tid_retry_timeout()
4811 if (!priv->s_retry) { in hfi1_tid_retry_timeout()
4815 (u64)priv->tid_retry_timeout_jiffies); in hfi1_tid_retry_timeout()
4817 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_tid_retry_timeout()
4821 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_tid_retry_timeout()
4824 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); in hfi1_tid_retry_timeout()
4826 priv->s_flags &= ~RVT_S_WAIT_ACK; in hfi1_tid_retry_timeout()
4828 priv->s_flags |= RVT_S_SEND_ONE; in hfi1_tid_retry_timeout()
4833 qp->s_flags |= HFI1_S_WAIT_HALT; in hfi1_tid_retry_timeout()
4834 priv->s_state = TID_OP(RESYNC); in hfi1_tid_retry_timeout()
4835 priv->s_retry--; in hfi1_tid_retry_timeout()
4839 spin_unlock(&qp->s_lock); in hfi1_tid_retry_timeout()
4840 spin_unlock_irqrestore(&qp->r_lock, flags); in hfi1_tid_retry_timeout()
4847 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_resync()
4850 struct tid_rdma_flow *flow = &req->flows[fidx]; in hfi1_build_tid_rdma_resync()
4854 remote = rcu_dereference(qpriv->tid_rdma.remote); in hfi1_build_tid_rdma_resync()
4855 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey); in hfi1_build_tid_rdma_resync()
4856 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_resync()
4857 *bth1 = remote->qp; in hfi1_build_tid_rdma_resync()
4860 generation = kern_flow_generation_next(flow->flow_state.generation); in hfi1_build_tid_rdma_resync()
4861 *bth2 = mask_psn((generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1); in hfi1_build_tid_rdma_resync()
4862 qpriv->s_resync_psn = *bth2; in hfi1_build_tid_rdma_resync()
4864 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1); in hfi1_build_tid_rdma_resync()
4866 return sizeof(ohdr->u.tid_rdma.resync) / sizeof(u32); in hfi1_build_tid_rdma_resync()
4871 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv_tid_rdma_resync()
4872 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_resync()
4873 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_resync()
4874 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_rc_rcv_tid_rdma_resync()
4875 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_rc_rcv_tid_rdma_resync()
4879 struct tid_flow_state *fs = &qpriv->flow_state; in hfi1_rc_rcv_tid_rdma_resync()
4885 psn = mask_psn(be32_to_cpu(ohdr->bth[2])); in hfi1_rc_rcv_tid_rdma_resync()
4888 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_resync()
4890 gen_next = (fs->generation == KERN_GENERATION_RESERVED) ? in hfi1_rc_rcv_tid_rdma_resync()
4891 generation : kern_flow_generation_next(fs->generation); in hfi1_rc_rcv_tid_rdma_resync()
4896 if (generation != mask_generation(gen_next - 1) && in hfi1_rc_rcv_tid_rdma_resync()
4900 if (qpriv->resync) in hfi1_rc_rcv_tid_rdma_resync()
4903 spin_lock(&rcd->exp_lock); in hfi1_rc_rcv_tid_rdma_resync()
4904 if (fs->index >= RXE_NUM_TID_FLOWS) { in hfi1_rc_rcv_tid_rdma_resync()
4909 fs->generation = generation; in hfi1_rc_rcv_tid_rdma_resync()
4912 rcd->flows[fs->index].generation = generation; in hfi1_rc_rcv_tid_rdma_resync()
4913 fs->generation = kern_setup_hw_flow(rcd, fs->index); in hfi1_rc_rcv_tid_rdma_resync()
4915 fs->psn = 0; in hfi1_rc_rcv_tid_rdma_resync()
4920 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; in hfi1_rc_rcv_tid_rdma_resync()
4928 for (idx = qpriv->r_tid_tail; ; idx++) { in hfi1_rc_rcv_tid_rdma_resync()
4931 if (idx > rvt_size_atomic(&dev->rdi)) in hfi1_rc_rcv_tid_rdma_resync()
4933 e = &qp->s_ack_queue[idx]; in hfi1_rc_rcv_tid_rdma_resync()
4934 if (e->opcode == TID_OP(WRITE_REQ)) { in hfi1_rc_rcv_tid_rdma_resync()
4936 trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn, in hfi1_rc_rcv_tid_rdma_resync()
4937 e->lpsn, req); in hfi1_rc_rcv_tid_rdma_resync()
4940 for (flow_idx = req->clear_tail; in hfi1_rc_rcv_tid_rdma_resync()
4941 CIRC_CNT(req->setup_head, flow_idx, in hfi1_rc_rcv_tid_rdma_resync()
4947 flow = &req->flows[flow_idx]; in hfi1_rc_rcv_tid_rdma_resync()
4949 flow->flow_state.lpsn); in hfi1_rc_rcv_tid_rdma_resync()
4950 next = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_resync()
4951 flow->npkts = delta_psn(lpsn, next - 1); in hfi1_rc_rcv_tid_rdma_resync()
4952 flow->flow_state.generation = fs->generation; in hfi1_rc_rcv_tid_rdma_resync()
4953 flow->flow_state.spsn = fs->psn; in hfi1_rc_rcv_tid_rdma_resync()
4954 flow->flow_state.lpsn = in hfi1_rc_rcv_tid_rdma_resync()
4955 flow->flow_state.spsn + flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_resync()
4956 flow->flow_state.r_next_psn = in hfi1_rc_rcv_tid_rdma_resync()
4958 flow->flow_state.spsn); in hfi1_rc_rcv_tid_rdma_resync()
4959 fs->psn += flow->npkts; in hfi1_rc_rcv_tid_rdma_resync()
4964 if (idx == qp->s_tail_ack_queue) in hfi1_rc_rcv_tid_rdma_resync()
4968 spin_unlock(&rcd->exp_lock); in hfi1_rc_rcv_tid_rdma_resync()
4969 qpriv->resync = true; in hfi1_rc_rcv_tid_rdma_resync()
4971 qpriv->s_nak_state = 0; in hfi1_rc_rcv_tid_rdma_resync()
4975 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_resync()
4976 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_resync()
4984 __must_hold(&qp->s_lock) in update_tid_tail()
4986 struct hfi1_qp_priv *priv = qp->priv; in update_tid_tail()
4990 lockdep_assert_held(&qp->s_lock); in update_tid_tail()
4992 if (priv->s_tid_tail == priv->s_tid_cur) in update_tid_tail()
4994 for (i = priv->s_tid_tail + 1; ; i++) { in update_tid_tail()
4995 if (i == qp->s_size) in update_tid_tail()
4998 if (i == priv->s_tid_cur) in update_tid_tail()
5001 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in update_tid_tail()
5004 priv->s_tid_tail = i; in update_tid_tail()
5005 priv->s_state = TID_OP(WRITE_RESP); in update_tid_tail()
5009 __must_hold(&qp->s_lock) in hfi1_make_tid_rdma_pkt()
5011 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_tid_rdma_pkt()
5015 struct rvt_sge_state *ss = &qp->s_sge; in hfi1_make_tid_rdma_pkt()
5016 struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in hfi1_make_tid_rdma_pkt()
5021 lockdep_assert_held(&qp->s_lock); in hfi1_make_tid_rdma_pkt()
5027 if (((atomic_read(&priv->n_tid_requests) < HFI1_TID_RDMA_WRITE_CNT) && in hfi1_make_tid_rdma_pkt()
5028 atomic_read(&priv->n_requests) && in hfi1_make_tid_rdma_pkt()
5029 !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK | in hfi1_make_tid_rdma_pkt()
5031 (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg && in hfi1_make_tid_rdma_pkt()
5032 !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) { in hfi1_make_tid_rdma_pkt()
5035 iowork = iowait_get_ib_work(&priv->s_iowait); in hfi1_make_tid_rdma_pkt()
5036 ps->s_txreq = get_waiting_verbs_txreq(iowork); in hfi1_make_tid_rdma_pkt()
5037 if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) { in hfi1_make_tid_rdma_pkt()
5038 priv->s_flags |= HFI1_S_TID_BUSY_SET; in hfi1_make_tid_rdma_pkt()
5043 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_tid_rdma_pkt()
5044 if (!ps->s_txreq) in hfi1_make_tid_rdma_pkt()
5047 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; in hfi1_make_tid_rdma_pkt()
5049 if ((priv->s_flags & RVT_S_ACK_PENDING) && in hfi1_make_tid_rdma_pkt()
5059 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) in hfi1_make_tid_rdma_pkt()
5062 if (priv->s_flags & RVT_S_WAIT_ACK) in hfi1_make_tid_rdma_pkt()
5066 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) in hfi1_make_tid_rdma_pkt()
5068 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail); in hfi1_make_tid_rdma_pkt()
5070 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_make_tid_rdma_pkt()
5071 wqe->lpsn, req); in hfi1_make_tid_rdma_pkt()
5072 switch (priv->s_state) { in hfi1_make_tid_rdma_pkt()
5075 priv->tid_ss.sge = wqe->sg_list[0]; in hfi1_make_tid_rdma_pkt()
5076 priv->tid_ss.sg_list = wqe->sg_list + 1; in hfi1_make_tid_rdma_pkt()
5077 priv->tid_ss.num_sge = wqe->wr.num_sge; in hfi1_make_tid_rdma_pkt()
5078 priv->tid_ss.total_len = wqe->length; in hfi1_make_tid_rdma_pkt()
5080 if (priv->s_state == TID_OP(WRITE_REQ)) in hfi1_make_tid_rdma_pkt()
5082 priv->s_state = TID_OP(WRITE_DATA); in hfi1_make_tid_rdma_pkt()
5101 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail); in hfi1_make_tid_rdma_pkt()
5103 len = wqe->length; in hfi1_make_tid_rdma_pkt()
5105 if (!req->comp_seg || req->cur_seg == req->comp_seg) in hfi1_make_tid_rdma_pkt()
5108 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, in hfi1_make_tid_rdma_pkt()
5109 wqe->psn, wqe->lpsn, req); in hfi1_make_tid_rdma_pkt()
5115 req->clear_tail = CIRC_NEXT(req->clear_tail, in hfi1_make_tid_rdma_pkt()
5117 if (++req->cur_seg < req->total_segs) { in hfi1_make_tid_rdma_pkt()
5118 if (!CIRC_CNT(req->setup_head, req->clear_tail, in hfi1_make_tid_rdma_pkt()
5120 qp->s_flags |= HFI1_S_WAIT_TID_RESP; in hfi1_make_tid_rdma_pkt()
5122 priv->s_state = TID_OP(WRITE_DATA_LAST); in hfi1_make_tid_rdma_pkt()
5129 hwords += sizeof(ohdr->u.tid_rdma.w_data) / sizeof(u32); in hfi1_make_tid_rdma_pkt()
5130 ss = &priv->tid_ss; in hfi1_make_tid_rdma_pkt()
5136 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur); in hfi1_make_tid_rdma_pkt()
5139 if (!req->comp_seg) { in hfi1_make_tid_rdma_pkt()
5141 (!priv->s_tid_cur ? qp->s_size : in hfi1_make_tid_rdma_pkt()
5142 priv->s_tid_cur) - 1); in hfi1_make_tid_rdma_pkt()
5147 CIRC_PREV(req->setup_head, in hfi1_make_tid_rdma_pkt()
5157 if (priv->s_flags & RVT_S_SEND_ONE) { in hfi1_make_tid_rdma_pkt()
5158 priv->s_flags &= ~RVT_S_SEND_ONE; in hfi1_make_tid_rdma_pkt()
5159 priv->s_flags |= RVT_S_WAIT_ACK; in hfi1_make_tid_rdma_pkt()
5162 qp->s_len -= len; in hfi1_make_tid_rdma_pkt()
5163 ps->s_txreq->hdr_dwords = hwords; in hfi1_make_tid_rdma_pkt()
5164 ps->s_txreq->sde = priv->s_sde; in hfi1_make_tid_rdma_pkt()
5165 ps->s_txreq->ss = ss; in hfi1_make_tid_rdma_pkt()
5166 ps->s_txreq->s_cur_size = len; in hfi1_make_tid_rdma_pkt()
5171 hfi1_put_txreq(ps->s_txreq); in hfi1_make_tid_rdma_pkt()
5173 ps->s_txreq = NULL; in hfi1_make_tid_rdma_pkt()
5174 priv->s_flags &= ~RVT_S_BUSY; in hfi1_make_tid_rdma_pkt()
5182 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID); in hfi1_make_tid_rdma_pkt()
5191 struct hfi1_qp_priv *qpriv = qp->priv; in make_tid_rdma_ack()
5192 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in make_tid_rdma_ack()
5202 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in make_tid_rdma_ack()
5205 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ in make_tid_rdma_ack()
5208 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5214 * the previous request) and let the do-while loop execute again. in make_tid_rdma_ack()
5215 * The advantage of executing the do-while loop is that any data in make_tid_rdma_ack()
5217 * RESYNC ack. It turns out that for the do-while loop we only need in make_tid_rdma_ack()
5218 * to pull back qpriv->r_tid_ack, not the segment in make_tid_rdma_ack()
5222 if (qpriv->resync) { in make_tid_rdma_ack()
5223 if (!req->ack_seg || req->ack_seg == req->total_segs) in make_tid_rdma_ack()
5224 qpriv->r_tid_ack = !qpriv->r_tid_ack ? in make_tid_rdma_ack()
5225 rvt_size_atomic(&dev->rdi) : in make_tid_rdma_ack()
5226 qpriv->r_tid_ack - 1; in make_tid_rdma_ack()
5227 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5231 trace_hfi1_rsp_make_tid_ack(qp, e->psn); in make_tid_rdma_ack()
5232 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn, in make_tid_rdma_ack()
5238 if (!qpriv->s_nak_state && !qpriv->resync && in make_tid_rdma_ack()
5239 req->ack_seg == req->comp_seg) in make_tid_rdma_ack()
5249 req->ack_seg += in make_tid_rdma_ack()
5250 /* Get up-to-date value */ in make_tid_rdma_ack()
5251 CIRC_CNT(req->clear_tail, req->acked_tail, in make_tid_rdma_ack()
5254 req->acked_tail = req->clear_tail; in make_tid_rdma_ack()
5257 * req->clear_tail points to the segment currently being in make_tid_rdma_ack()
5261 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS); in make_tid_rdma_ack()
5262 if (req->ack_seg != req->total_segs) in make_tid_rdma_ack()
5264 req->state = TID_REQUEST_COMPLETE; in make_tid_rdma_ack()
5266 next = qpriv->r_tid_ack + 1; in make_tid_rdma_ack()
5267 if (next > rvt_size_atomic(&dev->rdi)) in make_tid_rdma_ack()
5269 qpriv->r_tid_ack = next; in make_tid_rdma_ack()
5270 if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ)) in make_tid_rdma_ack()
5272 nreq = ack_to_tid_req(&qp->s_ack_queue[next]); in make_tid_rdma_ack()
5273 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) in make_tid_rdma_ack()
5277 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5282 * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and in make_tid_rdma_ack()
5285 if (qpriv->s_nak_state || in make_tid_rdma_ack()
5286 (qpriv->resync && in make_tid_rdma_ack()
5287 !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) && in make_tid_rdma_ack()
5288 (cmp_psn(qpriv->r_next_psn_kdeth - 1, in make_tid_rdma_ack()
5289 full_flow_psn(&req->flows[flow], in make_tid_rdma_ack()
5290 req->flows[flow].flow_state.lpsn)) > 0))) { in make_tid_rdma_ack()
5293 * requests. Therefore, we NAK with the req->acked_tail in make_tid_rdma_ack()
5294 * segment for the request at qpriv->r_tid_ack (same at in make_tid_rdma_ack()
5295 * this point as the req->clear_tail segment for the in make_tid_rdma_ack()
5296 * qpriv->r_tid_tail request) in make_tid_rdma_ack()
5298 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5300 flow = req->acked_tail; in make_tid_rdma_ack()
5301 } else if (req->ack_seg == req->total_segs && in make_tid_rdma_ack()
5302 qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) in make_tid_rdma_ack()
5303 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK; in make_tid_rdma_ack()
5306 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn, in make_tid_rdma_ack()
5311 qpriv->s_flags &= ~RVT_S_ACK_PENDING; in make_tid_rdma_ack()
5312 ps->s_txreq->hdr_dwords = hwords; in make_tid_rdma_ack()
5313 ps->s_txreq->sde = qpriv->s_sde; in make_tid_rdma_ack()
5314 ps->s_txreq->s_cur_size = len; in make_tid_rdma_ack()
5315 ps->s_txreq->ss = NULL; in make_tid_rdma_ack()
5318 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_tid_rdma_ack()
5326 qpriv->s_flags &= ~RVT_S_ACK_PENDING; in make_tid_rdma_ack()
5332 struct hfi1_qp_priv *priv = qp->priv; in hfi1_send_tid_ok()
5334 return !(priv->s_flags & RVT_S_BUSY || in hfi1_send_tid_ok()
5335 qp->s_flags & HFI1_S_ANY_WAIT_IO) && in hfi1_send_tid_ok()
5336 (verbs_txreq_queued(iowait_get_tid_work(&priv->s_iowait)) || in hfi1_send_tid_ok()
5337 (priv->s_flags & RVT_S_RESP_PENDING) || in hfi1_send_tid_ok()
5338 !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND)); in hfi1_send_tid_ok()
5344 struct rvt_qp *qp = iowait_to_qp(w->iow); in _hfi1_do_tid_send()
5352 struct hfi1_qp_priv *priv = qp->priv; in hfi1_do_tid_send()
5354 ps.dev = to_idev(qp->ibqp.device); in hfi1_do_tid_send()
5355 ps.ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_do_tid_send()
5357 ps.wait = iowait_get_tid_work(&priv->s_iowait); in hfi1_do_tid_send()
5359 ps.timeout_int = qp->timeout_jiffies / 8; in hfi1_do_tid_send()
5362 spin_lock_irqsave(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5366 if (qp->s_flags & HFI1_S_ANY_WAIT_IO) in hfi1_do_tid_send()
5367 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID); in hfi1_do_tid_send()
5368 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5372 priv->s_flags |= RVT_S_BUSY; in hfi1_do_tid_send()
5375 ps.cpu = priv->s_sde ? priv->s_sde->cpu : in hfi1_do_tid_send()
5376 cpumask_first(cpumask_of_node(ps.ppd->dd->node)); in hfi1_do_tid_send()
5379 /* insure a pre-built packet is handled */ in hfi1_do_tid_send()
5384 if (priv->s_flags & HFI1_S_TID_BUSY_SET) { in hfi1_do_tid_send()
5385 qp->s_flags |= RVT_S_BUSY; in hfi1_do_tid_send()
5386 ps.wait = iowait_get_ib_work(&priv->s_iowait); in hfi1_do_tid_send()
5388 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5401 spin_lock_irqsave(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5402 if (priv->s_flags & HFI1_S_TID_BUSY_SET) { in hfi1_do_tid_send()
5403 qp->s_flags &= ~RVT_S_BUSY; in hfi1_do_tid_send()
5404 priv->s_flags &= ~HFI1_S_TID_BUSY_SET; in hfi1_do_tid_send()
5405 ps.wait = iowait_get_tid_work(&priv->s_iowait); in hfi1_do_tid_send()
5406 if (iowait_flag_set(&priv->s_iowait, in hfi1_do_tid_send()
5412 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait); in hfi1_do_tid_send()
5413 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5418 struct hfi1_qp_priv *priv = qp->priv; in _hfi1_schedule_tid_send()
5420 to_iport(qp->ibqp.device, qp->port_num); in _hfi1_schedule_tid_send()
5422 struct hfi1_devdata *dd = ppd->dd; in _hfi1_schedule_tid_send()
5424 if ((dd->flags & HFI1_SHUTDOWN)) in _hfi1_schedule_tid_send()
5427 return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, in _hfi1_schedule_tid_send()
5428 priv->s_sde ? in _hfi1_schedule_tid_send()
5429 priv->s_sde->cpu : in _hfi1_schedule_tid_send()
5430 cpumask_first(cpumask_of_node(dd->node))); in _hfi1_schedule_tid_send()
5434 * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
5449 lockdep_assert_held(&qp->s_lock); in hfi1_schedule_tid_send()
5460 if (qp->s_flags & HFI1_S_ANY_WAIT_IO) in hfi1_schedule_tid_send()
5461 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, in hfi1_schedule_tid_send()
5470 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_tid_rdma_ack_interlock()
5471 struct hfi1_qp_priv *priv = qp->priv; in hfi1_tid_rdma_ack_interlock()
5474 s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) : in hfi1_tid_rdma_ack_interlock()
5475 (qp->s_tail_ack_queue - 1); in hfi1_tid_rdma_ack_interlock()
5476 prev = &qp->s_ack_queue[s_prev]; in hfi1_tid_rdma_ack_interlock()
5478 if ((e->opcode == TID_OP(READ_REQ) || in hfi1_tid_rdma_ack_interlock()
5479 e->opcode == OP(RDMA_READ_REQUEST)) && in hfi1_tid_rdma_ack_interlock()
5480 prev->opcode == TID_OP(WRITE_REQ)) { in hfi1_tid_rdma_ack_interlock()
5482 if (req->ack_seg != req->total_segs) { in hfi1_tid_rdma_ack_interlock()
5483 priv->s_flags |= HFI1_R_TID_WAIT_INTERLCK; in hfi1_tid_rdma_ack_interlock()
5510 spin_lock_irqsave(&qp->s_lock, flags); in tid_rdma_rcv_err()
5511 qp->s_flags |= RVT_S_ECN; in tid_rdma_rcv_err()
5512 spin_unlock_irqrestore(&qp->s_lock, flags); in tid_rdma_rcv_err()
5526 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER && in update_r_next_psn_fecn()
5527 !(priv->s_flags & HFI1_R_TID_SW_PSN)) { in update_r_next_psn_fecn()
5528 struct hfi1_devdata *dd = rcd->dd; in update_r_next_psn_fecn()
5530 flow->flow_state.r_next_psn = in update_r_next_psn_fecn()
5531 read_r_next_psn(dd, rcd->ctxt, flow->idx); in update_r_next_psn_fecn()