Lines Matching full:mr
35 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) in populate_mtts() argument
38 int nsg = mr->nsg; in populate_mtts()
44 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { in populate_mtts()
47 nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size)) in populate_mtts()
64 struct mlx5_vdpa_direct_mr *mr, in fill_create_direct_mr() argument
72 MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); in fill_create_direct_mr()
73 MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); in fill_create_direct_mr()
77 MLX5_SET64(mkc, mkc, start_addr, mr->offset); in fill_create_direct_mr()
78 MLX5_SET64(mkc, mkc, len, mr->end - mr->start); in fill_create_direct_mr()
79 MLX5_SET(mkc, mkc, log_page_size, mr->log_size); in fill_create_direct_mr()
81 get_octo_len(mr->end - mr->start, mr->log_size)); in fill_create_direct_mr()
83 get_octo_len(mr->end - mr->start, mr->log_size)); in fill_create_direct_mr()
84 populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt)); in fill_create_direct_mr()
91 struct mlx5_vdpa_direct_mr *mr, in create_direct_mr_end() argument
96 mr->mr = mlx5_idx_to_mkey(mkey_index); in create_direct_mr_end()
100 struct mlx5_vdpa_direct_mr *mr, in fill_destroy_direct_mr() argument
107 MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->mr)); in fill_destroy_direct_mr()
110 static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) in destroy_direct_mr() argument
112 if (!mr->mr) in destroy_direct_mr()
115 mlx5_vdpa_destroy_mkey(mvdev, mr->mr); in destroy_direct_mr()
118 static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) in map_start() argument
120 return max_t(u64, map->start, mr->start); in map_start()
123 static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) in map_end() argument
125 return min_t(u64, map->last + 1, mr->end); in map_end()
128 static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) in maplen() argument
130 return map_end(map, mr) - map_start(map, mr); in maplen()
189 klm->key = cpu_to_be32(dmr->mr); in fill_indir()
208 static int create_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) in create_direct_keys() argument
215 cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL); in create_direct_keys()
219 list_for_each_entry(dmr, &mr->head, list) { in create_direct_keys()
241 err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs); in create_direct_keys()
249 list_for_each_entry(dmr, &mr->head, list) { in create_direct_keys()
279 static int destroy_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) in DEFINE_FREE()
287 cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL); in DEFINE_FREE()
288 cmd_mem = kvcalloc(mr->num_directs, sizeof(*cmd_mem), GFP_KERNEL); in DEFINE_FREE()
292 list_for_each_entry(dmr, &mr->head, list) { in DEFINE_FREE()
301 err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs); in DEFINE_FREE()
309 list_for_each_entry(dmr, &mr->head, list) { in DEFINE_FREE()
312 dmr->mr = 0; in DEFINE_FREE()
323 static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) in create_indirect_key() argument
332 start = indir_start_addr(mr); in create_indirect_key()
333 len = indir_len(mr); in create_indirect_key()
337 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms); in create_indirect_key()
351 MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16); in create_indirect_key()
352 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms); in create_indirect_key()
353 fill_indir(mvdev, mr, in); in create_indirect_key()
354 err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen); in create_indirect_key()
364 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr, in map_direct_mr() argument
381 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); in map_direct_mr()
382 map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { in map_direct_mr()
383 size = maplen(map, mr); in map_direct_mr()
390 nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size); in map_direct_mr()
392 err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL); in map_direct_mr()
396 sg = mr->sg_head.sgl; in map_direct_mr()
397 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); in map_direct_mr()
398 map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { in map_direct_mr()
399 offset = mr->start > map->start ? mr->start - map->start : 0; in map_direct_mr()
401 paend = map->addr + offset + maplen(map, mr); in map_direct_mr()
417 mr->log_size = log_entity_size; in map_direct_mr()
418 mr->nsg = nsg; in map_direct_mr()
419 mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); in map_direct_mr()
420 if (!mr->nent) { in map_direct_mr()
428 sg_free_table(&mr->sg_head); in map_direct_mr()
432 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) in unmap_direct_mr() argument
436 destroy_direct_mr(mvdev, mr); in unmap_direct_mr()
437 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); in unmap_direct_mr()
438 sg_free_table(&mr->sg_head); in unmap_direct_mr()
442 struct mlx5_vdpa_mr *mr, in add_direct_chain() argument
475 mr->num_directs++; in add_direct_chain()
476 mr->num_klms++; in add_direct_chain()
479 list_splice_tail(&tmp, &mr->head); in add_direct_chain()
483 list_for_each_entry_safe(dmr, n, &mr->head, list) { in add_direct_chain()
498 struct mlx5_vdpa_mr *mr, in create_user_mr() argument
512 INIT_LIST_HEAD(&mr->head); in create_user_mr()
526 mr->num_klms += nnuls; in create_user_mr()
528 err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb); in create_user_mr()
537 err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb); in create_user_mr()
541 err = create_direct_keys(mvdev, mr); in create_user_mr()
549 err = create_indirect_key(mvdev, mr); in create_user_mr()
553 mr->user_mr = true; in create_user_mr()
557 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { in create_user_mr()
565 static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) in create_dma_mr() argument
585 err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen); in create_dma_mr()
587 mr->user_mr = false; in create_dma_mr()
593 static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) in destroy_dma_mr() argument
595 mlx5_vdpa_destroy_mkey(mvdev, mr->mkey); in destroy_dma_mr()
627 static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) in destroy_user_mr() argument
632 destroy_indirect_key(mvdev, mr); in destroy_user_mr()
633 destroy_direct_keys(mvdev, mr); in destroy_user_mr()
634 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { in destroy_user_mr()
641 static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) in _mlx5_vdpa_destroy_mr() argument
643 if (WARN_ON(!mr)) in _mlx5_vdpa_destroy_mr()
646 if (mr->user_mr) in _mlx5_vdpa_destroy_mr()
647 destroy_user_mr(mvdev, mr); in _mlx5_vdpa_destroy_mr()
649 destroy_dma_mr(mvdev, mr); in _mlx5_vdpa_destroy_mr()
651 vhost_iotlb_free(mr->iotlb); in _mlx5_vdpa_destroy_mr()
653 list_del(&mr->mr_list); in _mlx5_vdpa_destroy_mr()
655 kfree(mr); in _mlx5_vdpa_destroy_mr()
659 * This large delay is a simple way to prevent the MR cleanup from blocking
660 * .set_map() MR creation in this scenario.
667 struct mlx5_vdpa_mr *mr, *tmp; in mlx5_vdpa_mr_gc_handler() local
682 list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) { in mlx5_vdpa_mr_gc_handler()
683 _mlx5_vdpa_destroy_mr(mvdev, mr); in mlx5_vdpa_mr_gc_handler()
690 struct mlx5_vdpa_mr *mr) in _mlx5_vdpa_put_mr() argument
694 if (!mr) in _mlx5_vdpa_put_mr()
697 if (refcount_dec_and_test(&mr->refcount)) { in _mlx5_vdpa_put_mr()
698 list_move_tail(&mr->mr_list, &mres->mr_gc_list_head); in _mlx5_vdpa_put_mr()
705 struct mlx5_vdpa_mr *mr) in mlx5_vdpa_put_mr() argument
708 _mlx5_vdpa_put_mr(mvdev, mr); in mlx5_vdpa_put_mr()
713 struct mlx5_vdpa_mr *mr) in _mlx5_vdpa_get_mr() argument
715 if (!mr) in _mlx5_vdpa_get_mr()
718 refcount_inc(&mr->refcount); in _mlx5_vdpa_get_mr()
722 struct mlx5_vdpa_mr *mr) in mlx5_vdpa_get_mr() argument
725 _mlx5_vdpa_get_mr(mvdev, mr); in mlx5_vdpa_get_mr()
733 struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid]; in mlx5_vdpa_update_mr()
738 mvdev->mres.mr[asid] = new_mr; in mlx5_vdpa_update_mr()
745 struct mlx5_vdpa_mr *mr; in mlx5_vdpa_show_mr_leaks() local
749 list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) { in mlx5_vdpa_show_mr_leaks()
752 "mr: %p, mkey: 0x%x, refcount: %u\n", in mlx5_vdpa_show_mr_leaks()
753 mr, mr->mkey, refcount_read(&mr->refcount)); in mlx5_vdpa_show_mr_leaks()
774 struct mlx5_vdpa_mr *mr, in _mlx5_vdpa_create_mr() argument
780 err = create_user_mr(mvdev, mr, iotlb); in _mlx5_vdpa_create_mr()
782 err = create_dma_mr(mvdev, mr); in _mlx5_vdpa_create_mr()
787 mr->iotlb = vhost_iotlb_alloc(0, 0); in _mlx5_vdpa_create_mr()
788 if (!mr->iotlb) { in _mlx5_vdpa_create_mr()
793 err = dup_iotlb(mr->iotlb, iotlb); in _mlx5_vdpa_create_mr()
797 list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head); in _mlx5_vdpa_create_mr()
802 vhost_iotlb_free(mr->iotlb); in _mlx5_vdpa_create_mr()
806 destroy_user_mr(mvdev, mr); in _mlx5_vdpa_create_mr()
808 destroy_dma_mr(mvdev, mr); in _mlx5_vdpa_create_mr()
816 struct mlx5_vdpa_mr *mr; in mlx5_vdpa_create_mr() local
819 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_vdpa_create_mr()
820 if (!mr) in mlx5_vdpa_create_mr()
824 err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); in mlx5_vdpa_create_mr()
830 refcount_set(&mr->refcount, 1); in mlx5_vdpa_create_mr()
832 return mr; in mlx5_vdpa_create_mr()
835 kfree(mr); in mlx5_vdpa_create_mr()
860 struct mlx5_vdpa_mr *mr; in mlx5_vdpa_create_dma_mr() local
862 mr = mlx5_vdpa_create_mr(mvdev, NULL); in mlx5_vdpa_create_dma_mr()
863 if (IS_ERR(mr)) in mlx5_vdpa_create_dma_mr()
864 return PTR_ERR(mr); in mlx5_vdpa_create_dma_mr()
866 mlx5_vdpa_update_mr(mvdev, mr, 0); in mlx5_vdpa_create_dma_mr()
880 mlx5_vdpa_warn(mvdev, "create DMA MR failed\n"); in mlx5_vdpa_reset_mr()