Lines Matching +full:total +full:- +full:timeout

1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright(c) 2016 - 2020 Intel Corporation.
13 #include <rdma/rvt-abi.h>
55 #define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
60 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
61 * RVT_S_BUSY - send tasklet is processing the QP
62 * RVT_S_TIMER - the RC retry timer is active
63 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
64 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
66 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
68 * RVT_S_WAIT_RNR - waiting for RNR timeout
69 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
70 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
72 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
73 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
74 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
75 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
76 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
77 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
78 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
79 * RVT_S_ECN - a BECN was queued to the send engine
80 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
147 * rvt_ud_wr - IB UD work plus AH cache
165 * in qp->s_max_sge.
178 u32 length; /* total length of data in sg_list */
184 * struct rvt_krwq - kernel struct receive work request
189 * @count: count is aproximate of total receive enteries posted
209 * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
215 return ibah_to_rvtah(swqe->ud_wr.wr.ah); in rvt_get_swqe_ah()
219 * rvt_get_swqe_ah_attr - Return the cached ah attribute information
225 return swqe->ud_wr.attr; in rvt_get_swqe_ah_attr()
229 * rvt_get_swqe_remote_qpn - Access the remote QPN value
235 return swqe->ud_wr.wr.remote_qpn; in rvt_get_swqe_remote_qpn()
239 * rvt_get_swqe_remote_qkey - Acces the remote qkey value
245 return swqe->ud_wr.wr.remote_qkey; in rvt_get_swqe_remote_qkey()
249 * rvt_get_swqe_pkey_index - Access the pkey index
255 return swqe->ud_wr.wr.pkey_index; in rvt_get_swqe_pkey_index()
268 * rvt_get_rq_count - count numbers of request work queue entries
274 * Return - total number of entries in the Receive Queue
279 u32 count = head - tail; in rvt_get_rq_count()
282 count += rq->size; in rvt_get_rq_count()
312 * rvt_operation_params - op table entry
313 * @length - the length to copy into the swqe entry
314 * @qpt_support - a bit mask indicating QP type support
315 * @flags - RVT_OPERATION flags (see above)
343 unsigned long timeout_jiffies; /* computed from timeout */
356 u8 alt_timeout; /* Alternate path timeout for this QP */
357 u8 timeout; /* Timeout for this QP */ member
367 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
368 u8 s_max_sge; /* size of s_wq->sg_list */
383 u32 r_len; /* total length of r_sge */
411 u32 s_len; /* total length of s_sge */
412 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
421 u32 s_acked; /* last un-ACK'ed entry */
431 u8 s_nak_state; /* non-zero if NAK is pending */
432 u8 r_nak_state; /* non-zero if NAK is pending */
473 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
477 * QPN-map pages start out as NULL, they get allocated upon
535 return (struct rvt_swqe *)((char *)qp->s_wq + in rvt_get_swqe_ptr()
537 qp->s_max_sge * in rvt_get_swqe_ptr()
548 ((char *)rq->kwq->curr_wq + in rvt_get_rwqe_ptr()
550 rq->max_sge * sizeof(struct ib_sge)) * n); in rvt_get_rwqe_ptr()
554 * rvt_is_user_qp - return if this is user mode QP
555 * @qp - the target QP
559 return !!qp->pid; in rvt_is_user_qp()
563 * rvt_get_qp - get a QP reference
564 * @qp - the QP to hold
568 atomic_inc(&qp->refcount); in rvt_get_qp()
572 * rvt_put_qp - release a QP reference
573 * @qp - the QP to release
577 if (qp && atomic_dec_and_test(&qp->refcount)) in rvt_put_qp()
578 wake_up(&qp->wait); in rvt_put_qp()
582 * rvt_put_swqe - drop mr refs held by swqe
583 * @wqe - the send wqe
591 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_put_swqe()
592 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_put_swqe()
594 rvt_put_mr(sge->mr); in rvt_put_swqe()
599 * rvt_qp_wqe_reserve - reserve operation
600 * @qp - the rvt qp
601 * @wqe - the send wqe
610 atomic_inc(&qp->s_reserved_used); in rvt_qp_wqe_reserve()
614 * rvt_qp_wqe_unreserve - clean reserved operation
615 * @qp - the rvt qp
616 * @flags - send wqe flags
631 atomic_dec(&qp->s_reserved_used); in rvt_qp_wqe_unreserve()
632 /* insure no compiler re-order up to s_last change */ in rvt_qp_wqe_unreserve()
645 return (((int)a) - ((int)b)) << 8; in rvt_cmp_msn()
655 * rvt_div_round_up_mtu - round up divide
656 * @qp - the qp pair
657 * @len - the length
663 return (len + qp->pmtu - 1) >> qp->log_pmtu; in rvt_div_round_up_mtu()
667 * @qp - the qp pair
668 * @len - the length
674 return len >> qp->log_pmtu; in rvt_div_mtu()
678 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
679 * @timeout - timeout input(0 - 31).
681 * Return a timeout value in jiffies.
683 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout) in rvt_timeout_to_jiffies() argument
685 if (timeout > 31) in rvt_timeout_to_jiffies()
686 timeout = 31; in rvt_timeout_to_jiffies()
688 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL; in rvt_timeout_to_jiffies()
692 * rvt_lookup_qpn - return the QP with the given QPN
706 qp = rcu_dereference(rvp->qp[qpn]); in rvt_lookup_qpn()
708 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); in rvt_lookup_qpn()
710 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; in rvt_lookup_qpn()
711 qp = rcu_dereference(qp->next)) in rvt_lookup_qpn()
712 if (qp->ibqp.qp_num == qpn) in rvt_lookup_qpn()
719 * rvt_mod_retry_timer - mod a retry timer
720 * @qp - the QP
721 * @shift - timeout shift to wait for multiple packets
726 struct ib_qp *ibqp = &qp->ibqp; in rvt_mod_retry_timer_ext()
727 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in rvt_mod_retry_timer_ext()
729 lockdep_assert_held(&qp->s_lock); in rvt_mod_retry_timer_ext()
730 qp->s_flags |= RVT_S_TIMER; in rvt_mod_retry_timer_ext()
731 /* 4.096 usec. * (1 << qp->timeout) */ in rvt_mod_retry_timer_ext()
732 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies + in rvt_mod_retry_timer_ext()
733 (qp->timeout_jiffies << shift)); in rvt_mod_retry_timer_ext()
742 * rvt_put_qp_swqe - drop refs held by swqe
751 if (qp->allowed_ops == IB_OPCODE_UD) in rvt_put_qp_swqe()
752 rdma_destroy_ah_attr(wqe->ud_wr.attr); in rvt_put_qp_swqe()
756 * rvt_qp_sqwe_incr - increment ring index
765 if (++val >= qp->s_size) in rvt_qp_swqe_incr()
773 * rvt_recv_cq - add a new entry to completion queue
786 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); in rvt_recv_cq()
793 * rvt_send_cq - add a new entry to completion queue
806 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq()
813 * rvt_qp_complete_swqe - insert send completion
814 * @qp - the qp
815 * @wqe - the send wqe
816 * @opcode - wc operation (driver dependent)
817 * @status - completion status
837 int flags = wqe->wr.send_flags; in rvt_qp_complete_swqe()
844 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || in rvt_qp_complete_swqe()
848 wr_id = wqe->wr.wr_id; in rvt_qp_complete_swqe()
849 byte_len = wqe->length; in rvt_qp_complete_swqe()
852 last = rvt_qp_swqe_incr(qp, qp->s_last); in rvt_qp_complete_swqe()
854 smp_store_release(&qp->s_last, last); in rvt_qp_complete_swqe()
860 .qp = &qp->ibqp, in rvt_qp_complete_swqe()
893 * struct rvt_qp_iter - the iterator for QPs
894 * @qp - the current QP
915 * ib_cq_tail - Return tail index of cq buffer
916 * @send_cq - The cq for send
925 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail()
926 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) : in ib_cq_tail()
927 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail()
931 * ib_cq_head - Return head index of cq buffer
932 * @send_cq - The cq for send
941 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_head()
942 RDMA_READ_UAPI_ATOMIC(cq->queue->head) : in ib_cq_head()
943 ibcq_to_rvtcq(send_cq)->kqueue->head; in ib_cq_head()
947 * rvt_free_rq - free memory allocated for rvt_rq struct
955 kvfree(rq->kwq); in rvt_free_rq()
956 rq->kwq = NULL; in rvt_free_rq()
957 vfree(rq->wq); in rvt_free_rq()
958 rq->wq = NULL; in rvt_free_rq()
962 * rvt_to_iport - Get the ibport pointer
969 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_to_iport()
971 return rdi->ports[qp->port_num - 1]; in rvt_to_iport()
975 * rvt_rc_credit_avail - Check if there are enough RC credits for the request
984 lockdep_assert_held(&qp->s_lock); in rvt_rc_credit_avail()
985 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && in rvt_rc_credit_avail()
986 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { in rvt_rc_credit_avail()
989 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; in rvt_rc_credit_avail()
990 rvp->n_rc_crwaits++; in rvt_rc_credit_avail()