Lines Matching full:wr

1501 			    const struct ib_ud_wr *wr,  in build_mlx_header()  argument
1511 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, in build_mlx_header()
1514 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header()
1525 switch (wr->wr.opcode) { in build_mlx_header()
1533 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header()
1542 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header()
1547 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
1550 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header()
1552 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header()
1553 sqp->qkey : wr->remote_qkey); in build_mlx_header()
1595 const struct ib_atomic_wr *wr) in set_atomic_seg() argument
1597 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in set_atomic_seg()
1598 aseg->swap_add = cpu_to_be64(wr->swap); in set_atomic_seg()
1599 aseg->compare = cpu_to_be64(wr->compare_add); in set_atomic_seg()
1601 aseg->swap_add = cpu_to_be64(wr->compare_add); in set_atomic_seg()
1608 const struct ib_ud_wr *wr) in set_tavor_ud_seg() argument
1610 useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); in set_tavor_ud_seg()
1611 useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); in set_tavor_ud_seg()
1612 useg->dqpn = cpu_to_be32(wr->remote_qpn); in set_tavor_ud_seg()
1613 useg->qkey = cpu_to_be32(wr->remote_qkey); in set_tavor_ud_seg()
1618 const struct ib_ud_wr *wr) in set_arbel_ud_seg() argument
1620 memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); in set_arbel_ud_seg()
1621 useg->dqpn = cpu_to_be32(wr->remote_qpn); in set_arbel_ud_seg()
1622 useg->qkey = cpu_to_be32(wr->remote_qkey); in set_arbel_ud_seg()
1625 int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mthca_tavor_post_send() argument
1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send()
1662 *bad_wr = wr; in mthca_tavor_post_send()
1673 ((wr->send_flags & IB_SEND_SIGNALED) ? in mthca_tavor_post_send()
1675 ((wr->send_flags & IB_SEND_SOLICITED) ? in mthca_tavor_post_send()
1678 if (wr->opcode == IB_WR_SEND_WITH_IMM || in mthca_tavor_post_send()
1679 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) in mthca_tavor_post_send()
1680 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send()
1687 switch (wr->opcode) { in mthca_tavor_post_send()
1690 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send()
1691 atomic_wr(wr)->rkey); in mthca_tavor_post_send()
1694 set_atomic_seg(wqe, atomic_wr(wr)); in mthca_tavor_post_send()
1703 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_tavor_post_send()
1704 rdma_wr(wr)->rkey); in mthca_tavor_post_send()
1717 switch (wr->opcode) { in mthca_tavor_post_send()
1720 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_tavor_post_send()
1721 rdma_wr(wr)->rkey); in mthca_tavor_post_send()
1734 set_tavor_ud_seg(wqe, ud_wr(wr)); in mthca_tavor_post_send()
1741 dev, qp, ind, ud_wr(wr), in mthca_tavor_post_send()
1744 *bad_wr = wr; in mthca_tavor_post_send()
1752 if (wr->num_sge > qp->sq.max_gs) { in mthca_tavor_post_send()
1755 *bad_wr = wr; in mthca_tavor_post_send()
1759 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_send()
1760 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_send()
1774 qp->wrid[ind + qp->rq.max] = wr->wr_id; in mthca_tavor_post_send()
1776 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { in mthca_tavor_post_send()
1779 *bad_wr = wr; in mthca_tavor_post_send()
1786 mthca_opcode[wr->opcode]); in mthca_tavor_post_send()
1790 ((wr->send_flags & IB_SEND_FENCE) ? in mthca_tavor_post_send()
1795 op0 = mthca_opcode[wr->opcode]; in mthca_tavor_post_send()
1796 f0 = wr->send_flags & IB_SEND_FENCE ? in mthca_tavor_post_send()
1823 int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mthca_tavor_post_receive() argument
1851 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_receive()
1858 *bad_wr = wr; in mthca_tavor_post_receive()
1873 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mthca_tavor_post_receive()
1875 *bad_wr = wr; in mthca_tavor_post_receive()
1879 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_receive()
1880 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_receive()
1885 qp->wrid[ind] = wr->wr_id; in mthca_tavor_post_receive()
1928 int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mthca_arbel_post_send() argument
1959 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_send()
1992 *bad_wr = wr; in mthca_arbel_post_send()
2001 ((wr->send_flags & IB_SEND_SIGNALED) ? in mthca_arbel_post_send()
2003 ((wr->send_flags & IB_SEND_SOLICITED) ? in mthca_arbel_post_send()
2005 ((wr->send_flags & IB_SEND_IP_CSUM) ? in mthca_arbel_post_send()
2008 if (wr->opcode == IB_WR_SEND_WITH_IMM || in mthca_arbel_post_send()
2009 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) in mthca_arbel_post_send()
2010 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_arbel_post_send()
2017 switch (wr->opcode) { in mthca_arbel_post_send()
2020 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_arbel_post_send()
2021 atomic_wr(wr)->rkey); in mthca_arbel_post_send()
2024 set_atomic_seg(wqe, atomic_wr(wr)); in mthca_arbel_post_send()
2033 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_arbel_post_send()
2034 rdma_wr(wr)->rkey); in mthca_arbel_post_send()
2047 switch (wr->opcode) { in mthca_arbel_post_send()
2050 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_arbel_post_send()
2051 rdma_wr(wr)->rkey); in mthca_arbel_post_send()
2064 set_arbel_ud_seg(wqe, ud_wr(wr)); in mthca_arbel_post_send()
2071 dev, qp, ind, ud_wr(wr), in mthca_arbel_post_send()
2074 *bad_wr = wr; in mthca_arbel_post_send()
2082 if (wr->num_sge > qp->sq.max_gs) { in mthca_arbel_post_send()
2085 *bad_wr = wr; in mthca_arbel_post_send()
2089 for (i = 0; i < wr->num_sge; ++i) { in mthca_arbel_post_send()
2090 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_arbel_post_send()
2104 qp->wrid[ind + qp->rq.max] = wr->wr_id; in mthca_arbel_post_send()
2106 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { in mthca_arbel_post_send()
2109 *bad_wr = wr; in mthca_arbel_post_send()
2116 mthca_opcode[wr->opcode]); in mthca_arbel_post_send()
2120 ((wr->send_flags & IB_SEND_FENCE) ? in mthca_arbel_post_send()
2125 op0 = mthca_opcode[wr->opcode]; in mthca_arbel_post_send()
2126 f0 = wr->send_flags & IB_SEND_FENCE ? in mthca_arbel_post_send()
2162 int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mthca_arbel_post_receive() argument
2180 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_receive()
2187 *bad_wr = wr; in mthca_arbel_post_receive()
2197 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mthca_arbel_post_receive()
2199 *bad_wr = wr; in mthca_arbel_post_receive()
2203 for (i = 0; i < wr->num_sge; ++i) { in mthca_arbel_post_receive()
2204 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_arbel_post_receive()
2211 qp->wrid[ind] = wr->wr_id; in mthca_arbel_post_receive()