Lines Matching full:wr
674 /* Complete SQ WR's without processing */
675 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, in siw_sq_flush_wr() argument
680 while (wr) { in siw_sq_flush_wr()
683 switch (wr->opcode) { in siw_sq_flush_wr()
713 sqe.id = wr->wr_id; in siw_sq_flush_wr()
719 *bad_wr = wr; in siw_sq_flush_wr()
722 wr = wr->next; in siw_sq_flush_wr()
727 /* Complete RQ WR's without processing */
728 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, in siw_rq_flush_wr() argument
734 while (wr) { in siw_rq_flush_wr()
735 rqe.id = wr->wr_id; in siw_rq_flush_wr()
739 *bad_wr = wr; in siw_rq_flush_wr()
742 wr = wr->next; in siw_rq_flush_wr()
750 * Post a list of S-WR's to a SQ.
753 * @wr: Null terminated list of user WR's
754 * @bad_wr: Points to failing WR in case of synchronous failure.
756 int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, in siw_post_send() argument
765 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) { in siw_post_send()
766 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); in siw_post_send()
767 *bad_wr = wr; in siw_post_send()
786 rv = siw_sq_flush_wr(qp, wr, bad_wr); in siw_post_send()
790 *bad_wr = wr; in siw_post_send()
798 * Immediately flush this WR to CQ, if QP in siw_post_send()
800 * be empty, so WR complets in-order. in siw_post_send()
804 rv = siw_sq_flush_wr(qp, wr, bad_wr); in siw_post_send()
808 *bad_wr = wr; in siw_post_send()
816 while (wr) { in siw_post_send()
825 if (wr->num_sge > qp->attrs.sq_max_sges) { in siw_post_send()
826 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); in siw_post_send()
830 sqe->id = wr->wr_id; in siw_post_send()
832 if ((wr->send_flags & IB_SEND_SIGNALED) || in siw_post_send()
836 if (wr->send_flags & IB_SEND_FENCE) in siw_post_send()
839 switch (wr->opcode) { in siw_post_send()
842 if (wr->send_flags & IB_SEND_SOLICITED) in siw_post_send()
845 if (!(wr->send_flags & IB_SEND_INLINE)) { in siw_post_send()
846 siw_copy_sgl(wr->sg_list, sqe->sge, in siw_post_send()
847 wr->num_sge); in siw_post_send()
848 sqe->num_sge = wr->num_sge; in siw_post_send()
850 rv = siw_copy_inline_sgl(wr, sqe); in siw_post_send()
858 if (wr->opcode == IB_WR_SEND) in siw_post_send()
862 sqe->rkey = wr->ex.invalidate_rkey; in siw_post_send()
875 if (unlikely(wr->num_sge != 1)) { in siw_post_send()
879 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1); in siw_post_send()
883 sqe->raddr = rdma_wr(wr)->remote_addr; in siw_post_send()
884 sqe->rkey = rdma_wr(wr)->rkey; in siw_post_send()
887 if (wr->opcode == IB_WR_RDMA_READ) in siw_post_send()
894 if (!(wr->send_flags & IB_SEND_INLINE)) { in siw_post_send()
895 siw_copy_sgl(wr->sg_list, &sqe->sge[0], in siw_post_send()
896 wr->num_sge); in siw_post_send()
897 sqe->num_sge = wr->num_sge; in siw_post_send()
899 rv = siw_copy_inline_sgl(wr, sqe); in siw_post_send()
907 sqe->raddr = rdma_wr(wr)->remote_addr; in siw_post_send()
908 sqe->rkey = rdma_wr(wr)->rkey; in siw_post_send()
913 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; in siw_post_send()
914 sqe->rkey = reg_wr(wr)->key; in siw_post_send()
915 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; in siw_post_send()
920 sqe->rkey = wr->ex.invalidate_rkey; in siw_post_send()
925 siw_dbg_qp(qp, "ib wr type %d unsupported\n", in siw_post_send()
926 wr->opcode); in siw_post_send()
942 wr = wr->next; in siw_post_send()
983 *bad_wr = wr; in siw_post_send()
990 * Post a list of R-WR's to a RQ.
993 * @wr: Null terminated list of user WR's
994 * @bad_wr: Points to failing WR in case of synchronous failure.
996 int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, in siw_post_receive() argument
1004 *bad_wr = wr; in siw_post_receive()
1009 *bad_wr = wr; in siw_post_receive()
1028 rv = siw_rq_flush_wr(qp, wr, bad_wr); in siw_post_receive()
1032 *bad_wr = wr; in siw_post_receive()
1040 * Immediately flush this WR to CQ, if QP in siw_post_receive()
1042 * be empty, so WR complets in-order. in siw_post_receive()
1046 rv = siw_rq_flush_wr(qp, wr, bad_wr); in siw_post_receive()
1050 *bad_wr = wr; in siw_post_receive()
1062 while (wr) { in siw_post_receive()
1071 if (wr->num_sge > qp->attrs.rq_max_sges) { in siw_post_receive()
1072 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); in siw_post_receive()
1076 rqe->id = wr->wr_id; in siw_post_receive()
1077 rqe->num_sge = wr->num_sge; in siw_post_receive()
1078 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); in siw_post_receive()
1086 wr = wr->next; in siw_post_receive()
1094 *bad_wr = wr; in siw_post_receive()
1751 * @wr: List of R-WR's
1752 * @bad_wr: Updated to failing WR if posting fails.
1754 int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, in siw_post_srq_recv() argument
1774 while (wr) { in siw_post_srq_recv()
1783 if (unlikely(wr->num_sge > srq->max_sge)) { in siw_post_srq_recv()
1785 "[SRQ]: too many sge's: %d\n", wr->num_sge); in siw_post_srq_recv()
1789 rqe->id = wr->wr_id; in siw_post_srq_recv()
1790 rqe->num_sge = wr->num_sge; in siw_post_srq_recv()
1791 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); in siw_post_srq_recv()
1799 wr = wr->next; in siw_post_srq_recv()
1805 *bad_wr = wr; in siw_post_srq_recv()