Lines Matching +full:4 +full:- +full:wire

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
34 cur = wq->head - wq->tail; in mlx5r_wq_overflow()
35 if (likely(cur + nreq < wq->max_post)) in mlx5r_wq_overflow()
39 spin_lock(&cq->lock); in mlx5r_wq_overflow()
40 cur = wq->head - wq->tail; in mlx5r_wq_overflow()
41 spin_unlock(&cq->lock); in mlx5r_wq_overflow()
43 return cur + nreq >= wq->max_post; in mlx5r_wq_overflow()
49 rseg->raddr = cpu_to_be64(remote_addr); in set_raddr_seg()
50 rseg->rkey = cpu_to_be32(rkey); in set_raddr_seg()
51 rseg->reserved = 0; in set_raddr_seg()
61 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg()
62 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | in set_eth_seg()
65 if (wr->opcode == IB_WR_LSO) { in set_eth_seg()
68 void *pdata = ud_wr->header; in set_eth_seg()
71 left = ud_wr->hlen; in set_eth_seg()
72 eseg->mss = cpu_to_be16(ud_wr->mss); in set_eth_seg()
73 eseg->inline_hdr.sz = cpu_to_be16(left); in set_eth_seg()
79 copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start, in set_eth_seg()
81 memcpy(eseg->inline_hdr.data, pdata, copysz); in set_eth_seg()
82 stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) - in set_eth_seg()
83 sizeof(eseg->inline_hdr.start) + copysz, 16); in set_eth_seg()
88 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_eth_seg()
89 left -= copysz; in set_eth_seg()
91 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, in set_eth_seg()
105 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
106 dseg->av.dqp_dct = in set_datagram_seg()
107 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
108 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
113 dseg->byte_count = cpu_to_be32(sg->length); in set_data_ptr_seg()
114 dseg->lkey = cpu_to_be32(sg->lkey); in set_data_ptr_seg()
115 dseg->addr = cpu_to_be64(sg->addr); in set_data_ptr_seg()
164 int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size; in set_reg_umr_seg()
168 umr->flags = flags; in set_reg_umr_seg()
169 umr->xlt_octowords = cpu_to_be16(mlx5r_umr_get_xlt_octo(size)); in set_reg_umr_seg()
170 umr->mkey_mask = frwr_mkey_mask(atomic); in set_reg_umr_seg()
176 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); in set_linv_umr_seg()
177 umr->flags = MLX5_UMR_INLINE; in set_linv_umr_seg()
193 int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1; in set_reg_mkey_seg()
197 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT) in set_reg_mkey_seg()
198 seg->log2_page_size = ilog2(mr->ibmr.page_size); in set_reg_mkey_seg()
199 else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in set_reg_mkey_seg()
203 seg->flags = get_umr_flags(access) | mr->access_mode; in set_reg_mkey_seg()
204 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); in set_reg_mkey_seg()
205 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); in set_reg_mkey_seg()
206 seg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_mkey_seg()
207 seg->len = cpu_to_be64(mr->ibmr.length); in set_reg_mkey_seg()
208 seg->xlt_oct_size = cpu_to_be32(ndescs); in set_reg_mkey_seg()
214 seg->status = MLX5_MKEY_STATUS_FREE; in set_linv_mkey_seg()
221 int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs); in set_reg_data_seg()
223 dseg->addr = cpu_to_be64(mr->desc_map); in set_reg_data_seg()
224 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64)); in set_reg_data_seg()
225 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); in set_reg_data_seg()
230 switch (wr->opcode) { in send_ieth()
233 return wr->ex.imm_data; in send_ieth()
236 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
257 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); in wq_sig()
272 for (i = 0; i < wr->num_sge; i++) { in set_data_inl_seg()
273 size_t len = wr->sg_list[i].length; in set_data_inl_seg()
274 void *addr = (void *)(unsigned long)(wr->sg_list[i].addr); in set_data_inl_seg()
278 if (unlikely(inl > qp->max_inline_data)) in set_data_inl_seg()
279 return -ENOMEM; in set_data_inl_seg()
285 handle_post_send_edge(&qp->sq, wqe, in set_data_inl_seg()
286 *wqe_sz + (offset >> 4), in set_data_inl_seg()
289 leftlen = *cur_edge - *wqe; in set_data_inl_seg()
293 len -= copysz; in set_data_inl_seg()
300 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); in set_data_inl_seg()
302 *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16; in set_data_inl_seg()
333 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID | in mlx5_fill_inl_bsf()
335 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag); in mlx5_fill_inl_bsf()
336 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag); in mlx5_fill_inl_bsf()
338 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK; in mlx5_fill_inl_bsf()
339 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ? in mlx5_fill_inl_bsf()
342 if (domain->sig.dif.ref_remap) in mlx5_fill_inl_bsf()
343 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG; in mlx5_fill_inl_bsf()
345 if (domain->sig.dif.app_escape) { in mlx5_fill_inl_bsf()
346 if (domain->sig.dif.ref_escape) in mlx5_fill_inl_bsf()
347 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE; in mlx5_fill_inl_bsf()
349 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE; in mlx5_fill_inl_bsf()
352 inl->dif_app_bitmask_check = in mlx5_fill_inl_bsf()
353 cpu_to_be16(domain->sig.dif.apptag_check_mask); in mlx5_fill_inl_bsf()
360 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig; in mlx5_set_bsf()
361 struct mlx5_bsf_basic *basic = &bsf->basic; in mlx5_set_bsf()
362 struct ib_sig_domain *mem = &sig_attrs->mem; in mlx5_set_bsf()
363 struct ib_sig_domain *wire = &sig_attrs->wire; in mlx5_set_bsf() local
368 basic->bsf_size_sbs = 1 << 7; in mlx5_set_bsf()
370 basic->check_byte_mask = sig_attrs->check_mask; in mlx5_set_bsf()
371 basic->raw_data_size = cpu_to_be32(data_size); in mlx5_set_bsf()
374 switch (sig_attrs->mem.sig_type) { in mlx5_set_bsf()
378 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); in mlx5_set_bsf()
379 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx); in mlx5_set_bsf()
380 mlx5_fill_inl_bsf(mem, &bsf->m_inl); in mlx5_set_bsf()
383 return -EINVAL; in mlx5_set_bsf()
386 /* Wire domain */ in mlx5_set_bsf()
387 switch (sig_attrs->wire.sig_type) { in mlx5_set_bsf()
391 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && in mlx5_set_bsf()
392 mem->sig_type == wire->sig_type) { in mlx5_set_bsf()
394 basic->bsf_size_sbs |= 1 << 4; in mlx5_set_bsf()
395 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) in mlx5_set_bsf()
396 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK; in mlx5_set_bsf()
397 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) in mlx5_set_bsf()
398 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK; in mlx5_set_bsf()
399 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) in mlx5_set_bsf()
400 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK; in mlx5_set_bsf()
402 basic->wire.bs_selector = in mlx5_set_bsf()
403 bs_selector(wire->sig.dif.pi_interval); in mlx5_set_bsf()
405 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx); in mlx5_set_bsf()
406 mlx5_fill_inl_bsf(wire, &bsf->w_inl); in mlx5_set_bsf()
409 return -EINVAL; in mlx5_set_bsf()
433 struct mlx5_ib_mr *pi_mr = mr->pi_mr; in set_sig_data_segment()
435 data_len = pi_mr->data_length; in set_sig_data_segment()
436 data_key = pi_mr->ibmr.lkey; in set_sig_data_segment()
437 data_va = pi_mr->data_iova; in set_sig_data_segment()
438 if (pi_mr->meta_ndescs) { in set_sig_data_segment()
439 prot_len = pi_mr->meta_length; in set_sig_data_segment()
440 prot_key = pi_mr->ibmr.lkey; in set_sig_data_segment()
441 prot_va = pi_mr->pi_iova; in set_sig_data_segment()
451 * ------------------ in set_sig_data_segment()
453 * ------------------ in set_sig_data_segment()
455 * ------------------ in set_sig_data_segment()
459 data_klm->bcount = cpu_to_be32(data_len); in set_sig_data_segment()
460 data_klm->key = cpu_to_be32(data_key); in set_sig_data_segment()
461 data_klm->va = cpu_to_be64(data_va); in set_sig_data_segment()
467 * --------------------------- in set_sig_data_segment()
469 * --------------------------- in set_sig_data_segment()
471 * --------------------------- in set_sig_data_segment()
473 * --------------------------- in set_sig_data_segment()
475 * --------------------------- in set_sig_data_segment()
480 u16 block_size = sig_attrs->mem.sig.dif.pi_interval; in set_sig_data_segment()
487 prot_size = prot_field_size(sig_attrs->mem.sig_type); in set_sig_data_segment()
490 return -EINVAL; in set_sig_data_segment()
492 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size + in set_sig_data_segment()
494 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP); in set_sig_data_segment()
495 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size); in set_sig_data_segment()
496 sblock_ctrl->num_entries = cpu_to_be16(2); in set_sig_data_segment()
498 data_sentry->bcount = cpu_to_be16(block_size); in set_sig_data_segment()
499 data_sentry->key = cpu_to_be32(data_key); in set_sig_data_segment()
500 data_sentry->va = cpu_to_be64(data_va); in set_sig_data_segment()
501 data_sentry->stride = cpu_to_be16(block_size); in set_sig_data_segment()
503 prot_sentry->bcount = cpu_to_be16(prot_size); in set_sig_data_segment()
504 prot_sentry->key = cpu_to_be32(prot_key); in set_sig_data_segment()
505 prot_sentry->va = cpu_to_be64(prot_va); in set_sig_data_segment()
506 prot_sentry->stride = cpu_to_be16(prot_size); in set_sig_data_segment()
514 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
519 return -EINVAL; in set_sig_data_segment()
523 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
532 u32 sig_key = sig_mr->rkey; in set_sig_mkey_segment()
533 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; in set_sig_mkey_segment()
537 seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS; in set_sig_mkey_segment()
538 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); in set_sig_mkey_segment()
539 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | in set_sig_mkey_segment()
541 seg->len = cpu_to_be64(length); in set_sig_mkey_segment()
542 seg->xlt_oct_size = cpu_to_be32(mlx5r_umr_get_xlt_octo(size)); in set_sig_mkey_segment()
543 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); in set_sig_mkey_segment()
551 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; in set_sig_umr_segment()
552 umr->xlt_octowords = cpu_to_be16(mlx5r_umr_get_xlt_octo(size)); in set_sig_umr_segment()
553 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); in set_sig_umr_segment()
554 umr->mkey_mask = sig_mkey_mask(); in set_sig_umr_segment()
562 struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr); in set_pi_umr_wr()
563 struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr; in set_pi_umr_wr()
564 struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs; in set_pi_umr_wr()
565 u32 pdn = to_mpd(qp->ibqp.pd)->pdn; in set_pi_umr_wr()
569 if (unlikely(send_wr->num_sge != 0) || in set_pi_umr_wr()
570 unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) || in set_pi_umr_wr()
571 unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) || in set_pi_umr_wr()
572 unlikely(!sig_mr->sig->sig_status_checked)) in set_pi_umr_wr()
573 return -EINVAL; in set_pi_umr_wr()
576 region_len = pi_mr->ibmr.length; in set_pi_umr_wr()
579 * KLM octoword size - if protection was provided in set_pi_umr_wr()
583 if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE) in set_pi_umr_wr()
591 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
593 set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len, in set_pi_umr_wr()
597 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
599 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size, in set_pi_umr_wr()
604 sig_mr->sig->sig_status_checked = false; in set_pi_umr_wr()
614 psv_seg->psv_num = cpu_to_be32(psv_idx); in set_psv_wr()
615 switch (domain->sig_type) { in set_psv_wr()
619 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | in set_psv_wr()
620 domain->sig.dif.app_tag); in set_psv_wr()
621 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); in set_psv_wr()
625 domain->sig_type); in set_psv_wr()
626 return -EINVAL; in set_psv_wr()
640 struct mlx5_ib_mr *mr = to_mmr(wr->mr); in set_reg_wr()
641 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); in set_reg_wr()
642 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); in set_reg_wr()
643 int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size; in set_reg_wr()
645 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; in set_reg_wr()
652 if (!mlx5r_umr_can_reconfig(dev, 0, wr->access)) { in set_reg_wr()
654 to_mdev(qp->ibqp.device), in set_reg_wr()
656 return -EINVAL; in set_reg_wr()
659 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { in set_reg_wr()
660 mlx5_ib_warn(to_mdev(qp->ibqp.device), in set_reg_wr()
662 return -EINVAL; in set_reg_wr()
673 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
675 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
678 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
681 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs, in set_reg_wr()
683 *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4); in set_reg_wr()
698 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
702 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
711 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { in dump_wqe()
713 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); in dump_wqe()
716 idx = (idx + 1) & (qp->sq.wqe_cnt - 1); in dump_wqe()
729 if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in mlx5r_begin_wqe()
730 return -ENOMEM; in mlx5r_begin_wqe()
732 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in mlx5r_begin_wqe()
733 *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx); in mlx5r_begin_wqe()
736 (*ctrl)->general_id = general_id; in mlx5r_begin_wqe()
737 (*ctrl)->fm_ce_se = qp->sq_signal_bits | in mlx5r_begin_wqe()
743 *cur_edge = qp->sq.cur_edge; in mlx5r_begin_wqe()
754 send_ieth(wr), wr->send_flags & IB_SEND_SIGNALED, in begin_wqe()
755 wr->send_flags & IB_SEND_SOLICITED); in begin_wqe()
764 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | in mlx5r_finish_wqe()
766 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); in mlx5r_finish_wqe()
767 ctrl->fm_ce_se |= fence; in mlx5r_finish_wqe()
768 if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE)) in mlx5r_finish_wqe()
769 ctrl->signature = wq_sig(ctrl); in mlx5r_finish_wqe()
771 qp->sq.wrid[idx] = wr_id; in mlx5r_finish_wqe()
772 qp->sq.w_list[idx].opcode = mlx5_opcode; in mlx5r_finish_wqe()
773 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in mlx5r_finish_wqe()
774 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); in mlx5r_finish_wqe()
775 qp->sq.w_list[idx].next = qp->sq.cur_post; in mlx5r_finish_wqe()
781 qp->sq.cur_edge = (unlikely(seg == cur_edge)) ? in mlx5r_finish_wqe()
782 get_sq_edge(&qp->sq, qp->sq.cur_post & in mlx5r_finish_wqe()
783 (qp->sq.wqe_cnt - 1)) : in mlx5r_finish_wqe()
789 set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); in handle_rdma_op()
798 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; in handle_local_inv()
799 (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey); in handle_local_inv()
807 qp->sq.wr_data[idx] = IB_WR_REG_MR; in handle_reg_mr()
808 (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key); in handle_reg_mr()
828 err = -ENOMEM; in handle_psv()
836 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, in handle_psv()
858 qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY; in handle_reg_mr_integrity()
860 mr = to_mmr(reg_wr(wr)->mr); in handle_reg_mr_integrity()
861 pi_mr = mr->pi_mr; in handle_reg_mr_integrity()
867 reg_pi_wr.mr = &pi_mr->ibmr; in handle_reg_mr_integrity()
868 reg_pi_wr.access = reg_wr(wr)->access; in handle_reg_mr_integrity()
869 reg_pi_wr.key = pi_mr->ibmr.rkey; in handle_reg_mr_integrity()
871 (*ctrl)->imm = cpu_to_be32(reg_pi_wr.key); in handle_reg_mr_integrity()
878 wr->wr_id, nreq, fence, MLX5_OPCODE_UMR); in handle_reg_mr_integrity()
883 err = -ENOMEM; in handle_reg_mr_integrity()
889 pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey; in handle_reg_mr_integrity()
890 pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs; in handle_reg_mr_integrity()
891 pa_pi_mr.data_length = mr->data_length; in handle_reg_mr_integrity()
892 pa_pi_mr.data_iova = mr->data_iova; in handle_reg_mr_integrity()
893 if (mr->meta_ndescs) { in handle_reg_mr_integrity()
894 pa_pi_mr.meta_ndescs = mr->meta_ndescs; in handle_reg_mr_integrity()
895 pa_pi_mr.meta_length = mr->meta_length; in handle_reg_mr_integrity()
896 pa_pi_mr.pi_iova = mr->pi_iova; in handle_reg_mr_integrity()
899 pa_pi_mr.ibmr.length = mr->ibmr.length; in handle_reg_mr_integrity()
900 mr->pi_mr = &pa_pi_mr; in handle_reg_mr_integrity()
902 (*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey); in handle_reg_mr_integrity()
909 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, in handle_reg_mr_integrity()
912 sig_attrs = mr->ibmr.sig_attrs; in handle_reg_mr_integrity()
914 &sig_attrs->mem, mr->sig->psv_memory.psv_idx, in handle_reg_mr_integrity()
920 &sig_attrs->wire, mr->sig->psv_wire.psv_idx, in handle_reg_mr_integrity()
925 qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; in handle_reg_mr_integrity()
939 switch (wr->opcode) { in handle_qpt_rc()
950 err = -EOPNOTSUPP; in handle_qpt_rc()
984 switch (wr->opcode) { in handle_qpt_uc()
1001 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_hw_gsi()
1010 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1013 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { in handle_qpt_ud()
1021 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1028 struct mlx5_bf *bf = &qp->bf; in mlx5r_ring_db()
1030 qp->sq.head += nreq; in mlx5r_ring_db()
1037 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); in mlx5r_ring_db()
1044 mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset); in mlx5r_ring_db()
1048 bf->offset ^= bf->buf_size; in mlx5r_ring_db()
1055 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_post_send()
1056 struct mlx5_core_dev *mdev = dev->mdev; in mlx5_ib_post_send()
1071 if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && in mlx5_ib_post_send()
1074 return -EIO; in mlx5_ib_post_send()
1077 if (qp->type == IB_QPT_GSI) in mlx5_ib_post_send()
1080 spin_lock_irqsave(&qp->sq.lock, flags); in mlx5_ib_post_send()
1082 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
1083 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { in mlx5_ib_post_send()
1085 err = -EINVAL; in mlx5_ib_post_send()
1090 num_sge = wr->num_sge; in mlx5_ib_post_send()
1091 if (unlikely(num_sge > qp->sq.max_gs)) { in mlx5_ib_post_send()
1093 err = -EINVAL; in mlx5_ib_post_send()
1102 err = -ENOMEM; in mlx5_ib_post_send()
1107 if (wr->opcode == IB_WR_REG_MR || in mlx5_ib_post_send()
1108 wr->opcode == IB_WR_REG_MR_INTEGRITY) { in mlx5_ib_post_send()
1109 fence = dev->umr_fence; in mlx5_ib_post_send()
1112 if (wr->send_flags & IB_SEND_FENCE) { in mlx5_ib_post_send()
1113 if (qp->next_fence) in mlx5_ib_post_send()
1118 fence = qp->next_fence; in mlx5_ib_post_send()
1122 switch (qp->type) { in mlx5_ib_post_send()
1135 } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) { in mlx5_ib_post_send()
1144 if (unlikely(!dev->port_caps[qp->port - 1].has_smi)) { in mlx5_ib_post_send()
1146 err = -EPERM; in mlx5_ib_post_send()
1162 if (wr->send_flags & IB_SEND_INLINE && num_sge) { in mlx5_ib_post_send()
1171 handle_post_send_edge(&qp->sq, &seg, size, in mlx5_ib_post_send()
1173 if (unlikely(!wr->sg_list[i].length)) in mlx5_ib_post_send()
1178 wr->sg_list + i); in mlx5_ib_post_send()
1184 qp->next_fence = next_fence; in mlx5_ib_post_send()
1185 mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, in mlx5_ib_post_send()
1186 nreq, fence, mlx5_ib_opcode[wr->opcode]); in mlx5_ib_post_send()
1196 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx5_ib_post_send()
1203 sig->signature = calc_sig(sig, (max_gs + 1) << 2); in set_sig_seg()
1212 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_post_recv()
1213 struct mlx5_core_dev *mdev = dev->mdev; in mlx5_ib_post_recv()
1220 if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && in mlx5_ib_post_recv()
1223 return -EIO; in mlx5_ib_post_recv()
1226 if (qp->type == IB_QPT_GSI) in mlx5_ib_post_recv()
1229 spin_lock_irqsave(&qp->rq.lock, flags); in mlx5_ib_post_recv()
1231 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
1233 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv()
1234 if (mlx5r_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx5_ib_post_recv()
1235 err = -ENOMEM; in mlx5_ib_post_recv()
1240 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
1241 err = -EINVAL; in mlx5_ib_post_recv()
1246 scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind); in mlx5_ib_post_recv()
1247 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) in mlx5_ib_post_recv()
1250 for (i = 0; i < wr->num_sge; i++) in mlx5_ib_post_recv()
1251 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv()
1253 if (i < qp->rq.max_gs) { in mlx5_ib_post_recv()
1255 scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey; in mlx5_ib_post_recv()
1259 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) { in mlx5_ib_post_recv()
1261 set_sig_seg(sig, qp->rq.max_gs); in mlx5_ib_post_recv()
1264 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()
1266 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
1271 qp->rq.head += nreq; in mlx5_ib_post_recv()
1278 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx5_ib_post_recv()
1281 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx5_ib_post_recv()