Lines Matching full:wr

155 	 * so no need to post a RESET WR for these EQs.  in destroy_qp()
415 const struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument
423 for (i = 0; i < wr->num_sge; i++) { in build_immd()
424 if ((plen + wr->sg_list[i].length) > max) in build_immd()
426 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
427 plen += wr->sg_list[i].length; in build_immd()
428 rem = wr->sg_list[i].length; in build_immd()
490 const struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument
496 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send()
498 switch (wr->opcode) { in build_rdma_send()
500 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
509 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
525 if (wr->num_sge) { in build_rdma_send()
526 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_send()
527 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send()
537 wr->sg_list, wr->num_sge, &plen); in build_rdma_send()
541 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_send()
557 const struct ib_send_wr *wr, u8 *len16) in build_rdma_write() argument
563 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_write()
570 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) in build_rdma_write()
571 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data; in build_rdma_write()
574 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_write()
575 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); in build_rdma_write()
576 if (wr->num_sge) { in build_rdma_write()
577 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_write()
578 ret = build_immd(sq, wqe->write.u.immd_src, wr, in build_rdma_write()
588 wr->sg_list, wr->num_sge, &plen); in build_rdma_write()
592 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_write()
608 struct ib_send_wr *wr) in build_immd_cmpl() argument
610 memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16); in build_immd_cmpl()
618 const struct ib_send_wr *wr, u8 *len16) in build_rdma_write_cmpl() argument
625 * fit in one 64B WR slot. This is because the WQE is built in build_rdma_write_cmpl()
627 * by the code buildling sgls. IE the "fixed part" of the wr in build_rdma_write_cmpl()
634 wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_write_cmpl()
635 wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); in build_rdma_write_cmpl()
636 if (wr->next->opcode == IB_WR_SEND) in build_rdma_write_cmpl()
639 wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey); in build_rdma_write_cmpl()
644 if (wr->next->send_flags & IB_SEND_INLINE) in build_rdma_write_cmpl()
645 build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next); in build_rdma_write_cmpl()
648 &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL); in build_rdma_write_cmpl()
652 wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen); in build_rdma_write_cmpl()
655 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_write_cmpl()
660 static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, in build_rdma_read() argument
663 if (wr->num_sge > 1) in build_rdma_read()
665 if (wr->num_sge && wr->sg_list[0].length) { in build_rdma_read()
666 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_read()
667 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr in build_rdma_read()
669 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); in build_rdma_read()
670 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); in build_rdma_read()
671 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); in build_rdma_read()
672 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr in build_rdma_read()
674 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); in build_rdma_read()
690 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr) in post_write_cmpl() argument
692 bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) || in post_write_cmpl()
694 bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) || in post_write_cmpl()
704 * 2 slots. The FW WR, however, will be a single uber-WR. in post_write_cmpl()
708 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16); in post_write_cmpl()
717 swsqe->wr_id = wr->wr_id; in post_write_cmpl()
733 if (wr->next->opcode == IB_WR_SEND) in post_write_cmpl()
741 swsqe->wr_id = wr->next->wr_id; in post_write_cmpl()
760 const struct ib_recv_wr *wr, u8 *len16) in build_rdma_recv() argument
766 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); in build_rdma_recv()
770 sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16); in build_rdma_recv()
774 static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr, in build_srq_recv() argument
780 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); in build_srq_recv()
784 wr->num_sge * sizeof(struct fw_ri_sge), 16); in build_srq_recv()
789 const struct ib_reg_wr *wr, struct c4iw_mr *mhp, in build_tpte_memreg() argument
803 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) | in build_tpte_memreg()
805 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12)); in build_tpte_memreg()
821 const struct ib_reg_wr *wr, struct c4iw_mr *mhp, in build_memreg() argument
834 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; in build_memreg()
836 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); in build_memreg()
839 wqe->fr.stag = cpu_to_be32(wr->key); in build_memreg()
884 static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, in build_inv_stag() argument
887 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_inv_stag()
980 const struct ib_send_wr *wr) in complete_sq_drain_wr() argument
991 opcode = ib_to_fw_opcode(wr->opcode); in complete_sq_drain_wr()
995 cqe.u.drain_cookie = wr->wr_id; in complete_sq_drain_wr()
1019 const struct ib_send_wr *wr, in complete_sq_drain_wrs() argument
1024 while (wr) { in complete_sq_drain_wrs()
1025 ret = complete_sq_drain_wr(qhp, wr); in complete_sq_drain_wrs()
1027 *bad_wr = wr; in complete_sq_drain_wrs()
1030 wr = wr->next; in complete_sq_drain_wrs()
1036 const struct ib_recv_wr *wr) in complete_rq_drain_wr() argument
1046 cqe.u.drain_cookie = wr->wr_id; in complete_rq_drain_wr()
1069 const struct ib_recv_wr *wr) in complete_rq_drain_wrs() argument
1071 while (wr) { in complete_rq_drain_wrs()
1072 complete_rq_drain_wr(qhp, wr); in complete_rq_drain_wrs()
1073 wr = wr->next; in complete_rq_drain_wrs()
1077 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in c4iw_post_send() argument
1102 err = complete_sq_drain_wrs(qhp, wr, bad_wr); in c4iw_post_send()
1108 *bad_wr = wr; in c4iw_post_send()
1113 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is in c4iw_post_send()
1117 * request, then build and post the write_cmpl WR. If any of the tests in c4iw_post_send()
1124 wr && wr->next && !wr->next->next && in c4iw_post_send()
1125 wr->opcode == IB_WR_RDMA_WRITE && in c4iw_post_send()
1126 wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL && in c4iw_post_send()
1127 (wr->next->opcode == IB_WR_SEND || in c4iw_post_send()
1128 wr->next->opcode == IB_WR_SEND_WITH_INV) && in c4iw_post_send()
1129 wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE && in c4iw_post_send()
1130 wr->next->num_sge == 1 && num_wrs >= 2) { in c4iw_post_send()
1131 post_write_cmpl(qhp, wr); in c4iw_post_send()
1136 while (wr) { in c4iw_post_send()
1139 *bad_wr = wr; in c4iw_post_send()
1146 if (wr->send_flags & IB_SEND_SOLICITED) in c4iw_post_send()
1148 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) in c4iw_post_send()
1151 switch (wr->opcode) { in c4iw_post_send()
1154 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
1157 if (wr->opcode == IB_WR_SEND) in c4iw_post_send()
1161 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1173 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1179 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { in c4iw_post_send()
1180 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey); in c4iw_post_send()
1185 err = build_rdma_read(wqe, wr, &len16); in c4iw_post_send()
1188 swsqe->read_len = wr->sg_list[0].length; in c4iw_post_send()
1193 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr); in c4iw_post_send()
1199 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr), in c4iw_post_send()
1203 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), in c4iw_post_send()
1213 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
1217 err = build_inv_stag(wqe, wr, &len16); in c4iw_post_send()
1218 c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey); in c4iw_post_send()
1222 wr->opcode); in c4iw_post_send()
1226 *bad_wr = wr; in c4iw_post_send()
1231 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || in c4iw_post_send()
1234 swsqe->wr_id = wr->wr_id; in c4iw_post_send()
1244 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
1246 wr = wr->next; in c4iw_post_send()
1261 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in c4iw_post_receive() argument
1281 complete_rq_drain_wrs(qhp, wr); in c4iw_post_receive()
1287 *bad_wr = wr; in c4iw_post_receive()
1290 while (wr) { in c4iw_post_receive()
1291 if (wr->num_sge > T4_MAX_RECV_SGE) { in c4iw_post_receive()
1293 *bad_wr = wr; in c4iw_post_receive()
1300 err = build_rdma_recv(qhp, wqe, wr, &len16); in c4iw_post_receive()
1304 *bad_wr = wr; in c4iw_post_receive()
1308 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
1325 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
1328 wr = wr->next; in c4iw_post_receive()
1357 int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in c4iw_post_srq_recv() argument
1375 while (wr) { in c4iw_post_srq_recv()
1376 if (wr->num_sge > T4_MAX_RECV_SGE) { in c4iw_post_srq_recv()
1378 *bad_wr = wr; in c4iw_post_srq_recv()
1383 err = build_srq_recv(wqe, wr, &len16); in c4iw_post_srq_recv()
1387 *bad_wr = wr; in c4iw_post_srq_recv()
1402 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16); in c4iw_post_srq_recv()
1404 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id; in c4iw_post_srq_recv()
1411 (unsigned long long)wr->wr_id); in c4iw_post_srq_recv()
1415 wr = wr->next; in c4iw_post_srq_recv()