Lines Matching full:mr
174 struct mlx5_ib_mr *mr, int flags) in populate_mtt() argument
176 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in populate_mtt()
190 struct mlx5_ib_mr *mr, int flags) in mlx5_odp_populate_xlt() argument
193 populate_klm(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
195 populate_mtt(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
200 * This must be called after the mr has been removed from implicit_children.
201 * NOTE: The MR does not necessarily have to be
207 struct mlx5_ib_mr *mr = in free_implicit_child_mr_work() local
209 struct mlx5_ib_mr *imr = mr->parent; in free_implicit_child_mr_work()
211 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in free_implicit_child_mr_work()
213 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in free_implicit_child_mr_work()
216 mlx5r_umr_update_xlt(mr->parent, in free_implicit_child_mr_work()
220 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in free_implicit_child_mr_work()
225 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) in destroy_unused_implicit_child_mr() argument
227 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in destroy_unused_implicit_child_mr()
229 struct mlx5_ib_mr *imr = mr->parent; in destroy_unused_implicit_child_mr()
235 if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault)) in destroy_unused_implicit_child_mr()
236 xa_erase(&mr_to_mdev(mr)->odp_mkeys, in destroy_unused_implicit_child_mr()
237 mlx5_base_mkey(mr->mmkey.key)); in destroy_unused_implicit_child_mr()
239 /* Freeing a MR is a sleeping operation, so bounce to a work queue */ in destroy_unused_implicit_child_mr()
240 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work); in destroy_unused_implicit_child_mr()
241 queue_work(system_unbound_wq, &mr->odp_destroy.work); in destroy_unused_implicit_child_mr()
250 struct mlx5_ib_mr *mr; in mlx5_ib_invalidate_range() local
270 mr = umem_odp->private; in mlx5_ib_invalidate_range()
302 mlx5r_umr_update_xlt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
311 mlx5r_umr_update_xlt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
316 mlx5_update_odp_stats(mr, invalidations, invalidations); in mlx5_ib_invalidate_range()
326 if (unlikely(!umem_odp->npages && mr->parent)) in mlx5_ib_invalidate_range()
327 destroy_unused_implicit_child_mr(mr); in mlx5_ib_invalidate_range()
444 struct mlx5_ib_mr *mr; in implicit_get_child_mr() local
454 mr = mlx5_mr_cache_alloc(dev, imr->access_flags, in implicit_get_child_mr()
457 if (IS_ERR(mr)) { in implicit_get_child_mr()
459 return mr; in implicit_get_child_mr()
462 mr->access_flags = imr->access_flags; in implicit_get_child_mr()
463 mr->ibmr.pd = imr->ibmr.pd; in implicit_get_child_mr()
464 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev; in implicit_get_child_mr()
465 mr->umem = &odp->umem; in implicit_get_child_mr()
466 mr->ibmr.lkey = mr->mmkey.key; in implicit_get_child_mr()
467 mr->ibmr.rkey = mr->mmkey.key; in implicit_get_child_mr()
468 mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; in implicit_get_child_mr()
469 mr->parent = imr; in implicit_get_child_mr()
470 odp->private = mr; in implicit_get_child_mr()
476 refcount_set(&mr->mmkey.usecount, 2); in implicit_get_child_mr()
478 err = mlx5r_umr_update_xlt(mr, 0, in implicit_get_child_mr()
489 ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr, in implicit_get_child_mr()
497 * Another thread beat us to creating the child mr, use in implicit_get_child_mr()
506 ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), in implicit_get_child_mr()
507 &mr->mmkey, GFP_KERNEL); in implicit_get_child_mr()
513 mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD; in implicit_get_child_mr()
515 mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr); in implicit_get_child_mr()
516 return mr; in implicit_get_child_mr()
521 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in implicit_get_child_mr()
527 * and each implicit MR needs to assign a private null mkey to get the page
631 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr); in mlx5_ib_alloc_implicit_mr()
639 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr) in mlx5_ib_free_odp_mr() argument
645 * If this is an implicit MR it is already invalidated so we can just in mlx5_ib_free_odp_mr()
648 xa_for_each(&mr->implicit_children, idx, mtt) { in mlx5_ib_free_odp_mr()
649 xa_erase(&mr->implicit_children, idx); in mlx5_ib_free_odp_mr()
653 if (mr->null_mmkey.key) { in mlx5_ib_free_odp_mr()
654 xa_erase(&mr_to_mdev(mr)->odp_mkeys, in mlx5_ib_free_odp_mr()
655 mlx5_base_mkey(mr->null_mmkey.key)); in mlx5_ib_free_odp_mr()
657 mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, in mlx5_ib_free_odp_mr()
658 mr->null_mmkey.key); in mlx5_ib_free_odp_mr()
665 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, in pagefault_real_mr() argument
691 * No need to check whether the MTTs really belong to this MR, since in pagefault_real_mr()
694 ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags); in pagefault_real_mr()
699 mlx5_ib_err(mr_to_mdev(mr), in pagefault_real_mr()
732 /* Fault each child mr that intersects with our interval. */ in pagefault_implicit_mr()
785 * see a MR that is not yet visible in the KSM. This is similar to a in pagefault_implicit_mr()
786 * parallel page fault seeing a MR that is being concurrently removed in pagefault_implicit_mr()
803 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, in pagefault_dmabuf_mr() argument
806 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); in pagefault_dmabuf_mr()
826 if (mr->data_direct) in pagefault_dmabuf_mr()
827 err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags); in pagefault_dmabuf_mr()
829 err = mlx5r_umr_update_mr_pas(mr, xlt_flags); in pagefault_dmabuf_mr()
839 return ib_umem_num_pages(mr->umem); in pagefault_dmabuf_mr()
844 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
845 * not accessible, or the MR is no longer valid.
851 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, in pagefault_mr() argument
854 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in pagefault_mr()
856 if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault) in pagefault_mr()
859 if (mr->umem->is_dmabuf) in pagefault_mr()
860 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags); in pagefault_mr()
863 u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova; in pagefault_mr()
878 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, in pagefault_mr()
881 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped, in pagefault_mr()
885 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr) in mlx5_ib_init_odp_mr() argument
889 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address, in mlx5_ib_init_odp_mr()
890 mr->umem->length, NULL, in mlx5_ib_init_odp_mr()
895 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr) in mlx5_ib_init_dmabuf_mr() argument
899 ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL, in mlx5_ib_init_dmabuf_mr()
964 struct mlx5_ib_mr *mr; in pagefault_single_data_segment() local
978 "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", in pagefault_single_data_segment()
994 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in pagefault_single_data_segment()
996 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false); in pagefault_single_data_segment()
1000 mlx5_update_odp_stats(mr, faults, ret); in pagefault_single_data_segment()
1502 struct mlx5_ib_mr *mr, *child_mr; in mlx5_ib_mr_memory_pfault_handler() local
1512 mr = child_mr->parent; in mlx5_ib_mr_memory_pfault_handler()
1515 mr = container_of(mmkey, struct mlx5_ib_mr, null_mmkey); in mlx5_ib_mr_memory_pfault_handler()
1518 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in mlx5_ib_mr_memory_pfault_handler()
1523 ret = pagefault_mr(mr, prefetch_va, prefetch_size, NULL, 0, true); in mlx5_ib_mr_memory_pfault_handler()
1525 ret = pagefault_mr(mr, pfault->memory.va, in mlx5_ib_mr_memory_pfault_handler()
1532 mlx5_update_odp_stats(mr, faults, ret); in mlx5_ib_mr_memory_pfault_handler()
1891 struct mlx5_ib_mr *mr; member
1901 mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey); in destroy_prefetch_work()
1911 struct mlx5_ib_mr *mr = NULL; in get_prefetchable_mr() local
1917 mr = ERR_PTR(-ENOENT); in get_prefetchable_mr()
1921 mr = ERR_PTR(-EINVAL); in get_prefetchable_mr()
1925 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in get_prefetchable_mr()
1927 if (mr->ibmr.pd != pd) { in get_prefetchable_mr()
1928 mr = ERR_PTR(-EPERM); in get_prefetchable_mr()
1932 /* prefetch with write-access must be supported by the MR */ in get_prefetchable_mr()
1934 !mr->umem->writable) { in get_prefetchable_mr()
1935 mr = ERR_PTR(-EPERM); in get_prefetchable_mr()
1942 return mr; in get_prefetchable_mr()
1956 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, in mlx5_ib_prefetch_mr_work()
1961 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret); in mlx5_ib_prefetch_mr_work()
1978 struct mlx5_ib_mr *mr; in init_prefetch_work() local
1980 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); in init_prefetch_work()
1981 if (IS_ERR(mr)) { in init_prefetch_work()
1983 return PTR_ERR(mr); in init_prefetch_work()
1987 work->frags[i].mr = mr; in init_prefetch_work()
2003 struct mlx5_ib_mr *mr; in mlx5_ib_prefetch_sg_list() local
2005 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); in mlx5_ib_prefetch_sg_list()
2006 if (IS_ERR(mr)) in mlx5_ib_prefetch_sg_list()
2007 return PTR_ERR(mr); in mlx5_ib_prefetch_sg_list()
2008 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, in mlx5_ib_prefetch_sg_list()
2011 mlx5r_deref_odp_mkey(&mr->mmkey); in mlx5_ib_prefetch_sg_list()
2014 mlx5_update_odp_stats(mr, prefetch, ret); in mlx5_ib_prefetch_sg_list()
2015 mlx5r_deref_odp_mkey(&mr->mmkey); in mlx5_ib_prefetch_sg_list()