Lines Matching full:mr
383 * mlx5r_umr_revoke_mr - Fence all DMA on the MR
384 * @mr: The MR to fence
386 * Upon return the NIC will not be doing any DMA to the pages under the MR,
390 int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr) in mlx5r_umr_revoke_mr() argument
392 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5r_umr_revoke_mr()
406 mlx5_mkey_variant(mr->mmkey.key)); in mlx5r_umr_revoke_mr()
408 return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false); in mlx5r_umr_revoke_mr()
429 int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, in mlx5r_umr_rereg_pd_access() argument
432 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5r_umr_rereg_pd_access()
445 mlx5_mkey_variant(mr->mmkey.key)); in mlx5r_umr_rereg_pd_access()
447 err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false); in mlx5r_umr_rereg_pd_access()
451 mr->access_flags = access_flags; in mlx5r_umr_rereg_pd_access()
574 struct mlx5_ib_mr *mr, in mlx5r_umr_set_update_xlt_mkey_seg() argument
577 mlx5r_umr_set_access_flags(dev, mkey_seg, mr->access_flags); in mlx5r_umr_set_update_xlt_mkey_seg()
578 MLX5_SET(mkc, mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn); in mlx5r_umr_set_update_xlt_mkey_seg()
579 MLX5_SET64(mkc, mkey_seg, start_addr, mr->ibmr.iova); in mlx5r_umr_set_update_xlt_mkey_seg()
580 MLX5_SET64(mkc, mkey_seg, len, mr->ibmr.length); in mlx5r_umr_set_update_xlt_mkey_seg()
583 MLX5_SET(mkc, mkey_seg, mkey_7_0, mlx5_mkey_variant(mr->mmkey.key)); in mlx5r_umr_set_update_xlt_mkey_seg()
607 struct mlx5_ib_mr *mr, struct ib_sge *sg, in mlx5r_umr_final_update_xlt() argument
629 if (!mr->ibmr.length) in mlx5r_umr_final_update_xlt()
639 _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd) in _mlx5r_umr_update_mr_pas() argument
642 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in _mlx5r_umr_update_mr_pas()
657 ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift), in _mlx5r_umr_update_mr_pas()
664 mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, in _mlx5r_umr_update_mr_pas()
665 mr->page_shift); in _mlx5r_umr_update_mr_pas()
677 rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) { in _mlx5r_umr_update_mr_pas()
682 err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, in _mlx5r_umr_update_mr_pas()
705 if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP)) in _mlx5r_umr_update_mr_pas()
715 mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags); in _mlx5r_umr_update_mr_pas()
718 err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true); in _mlx5r_umr_update_mr_pas()
726 int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags) in mlx5r_umr_update_data_direct_ksm_pas() argument
729 if (WARN_ON(!mr->umem->is_dmabuf) || (flags & MLX5_IB_UPD_XLT_ZAP)) in mlx5r_umr_update_data_direct_ksm_pas()
732 return _mlx5r_umr_update_mr_pas(mr, flags, true); in mlx5r_umr_update_data_direct_ksm_pas()
736 * Send the DMA list to the HW for a normal MR using UMR.
737 * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
740 int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) in mlx5r_umr_update_mr_pas() argument
742 if (WARN_ON(mr->umem->is_odp)) in mlx5r_umr_update_mr_pas()
745 return _mlx5r_umr_update_mr_pas(mr, flags, false); in mlx5r_umr_update_mr_pas()
753 int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, in mlx5r_umr_update_xlt() argument
760 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5r_umr_update_xlt()
777 if (WARN_ON(!mr->umem->is_odp)) in mlx5r_umr_update_xlt()
797 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in mlx5r_umr_update_xlt()
804 mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, page_shift); in mlx5r_umr_update_xlt()
814 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); in mlx5r_umr_update_xlt()
820 mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags); in mlx5r_umr_update_xlt()
822 err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true); in mlx5r_umr_update_xlt()