Lines Matching full:mr
130 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument
132 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
134 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); in create_mkey_warn()
305 /* Synchronously create a MR in the cache */
724 struct mlx5_ib_mr *mr; in _mlx5_mr_cache_alloc() local
727 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in _mlx5_mr_cache_alloc()
728 if (!mr) in _mlx5_mr_cache_alloc()
738 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
743 kfree(mr); in _mlx5_mr_cache_alloc()
747 mr->mmkey.key = pop_mkey_locked(ent); in _mlx5_mr_cache_alloc()
751 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
752 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_mr_cache_alloc()
753 mr->mmkey.rb_key = ent->rb_key; in _mlx5_mr_cache_alloc()
754 mr->mmkey.cacheable = true; in _mlx5_mr_cache_alloc()
755 init_waitqueue_head(&mr->mmkey.wait); in _mlx5_mr_cache_alloc()
756 return mr; in _mlx5_mr_cache_alloc()
1026 struct mlx5_ib_mr *mr; in mlx5_ib_get_dma_mr() local
1031 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dma_mr()
1032 if (!mr) in mlx5_ib_get_dma_mr()
1049 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
1054 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
1055 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1056 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1057 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1059 return &mr->ibmr; in mlx5_ib_get_dma_mr()
1065 kfree(mr); in mlx5_ib_get_dma_mr()
1088 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, in set_mr_fields() argument
1091 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1092 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1093 mr->ibmr.length = length; in set_mr_fields()
1094 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
1095 mr->ibmr.iova = iova; in set_mr_fields()
1096 mr->access_flags = access_flags; in set_mr_fields()
1117 struct mlx5_ib_mr *mr; in alloc_cacheable_mr() local
1133 * If the MR can't come from the cache then synchronously create an uncached in alloc_cacheable_mr()
1138 mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode); in alloc_cacheable_mr()
1140 if (IS_ERR(mr)) in alloc_cacheable_mr()
1141 return mr; in alloc_cacheable_mr()
1142 mr->mmkey.rb_key = rb_key; in alloc_cacheable_mr()
1143 mr->mmkey.cacheable = true; in alloc_cacheable_mr()
1144 return mr; in alloc_cacheable_mr()
1147 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1148 if (IS_ERR(mr)) in alloc_cacheable_mr()
1149 return mr; in alloc_cacheable_mr()
1151 mr->ibmr.pd = pd; in alloc_cacheable_mr()
1152 mr->umem = umem; in alloc_cacheable_mr()
1153 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
1154 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
1156 return mr; in alloc_cacheable_mr()
1165 struct mlx5_ib_mr *mr; in reg_create_crossing_vhca_mr() local
1174 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create_crossing_vhca_mr()
1175 if (!mr) in reg_create_crossing_vhca_mr()
1198 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create_crossing_vhca_mr()
1202 mr->mmkey.type = MLX5_MKEY_MR; in reg_create_crossing_vhca_mr()
1203 set_mr_fields(dev, mr, length, access_flags, iova); in reg_create_crossing_vhca_mr()
1204 mr->ibmr.pd = pd; in reg_create_crossing_vhca_mr()
1206 mlx5_ib_dbg(dev, "crossing mkey = 0x%x\n", mr->mmkey.key); in reg_create_crossing_vhca_mr()
1208 return &mr->ibmr; in reg_create_crossing_vhca_mr()
1212 kfree(mr); in reg_create_crossing_vhca_mr()
1226 struct mlx5_ib_mr *mr; in reg_create() local
1238 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create()
1239 if (!mr) in reg_create()
1242 mr->ibmr.pd = pd; in reg_create()
1243 mr->access_flags = access_flags; in reg_create()
1244 mr->page_shift = order_base_2(page_size); in reg_create()
1261 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1285 get_octo_len(iova, umem->length, mr->page_shift) * 2); in reg_create()
1288 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1289 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1294 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1297 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1302 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1303 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); in reg_create()
1304 mr->umem = umem; in reg_create()
1305 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1308 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1310 return mr; in reg_create()
1315 kfree(mr); in reg_create()
1324 struct mlx5_ib_mr *mr; in mlx5_ib_get_dm_mr() local
1329 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dm_mr()
1330 if (!mr) in mlx5_ib_get_dm_mr()
1346 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1352 set_mr_fields(dev, mr, length, acc, start_addr); in mlx5_ib_get_dm_mr()
1354 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1360 kfree(mr); in mlx5_ib_get_dm_mr()
1419 struct mlx5_ib_mr *mr = NULL; in create_real_mr() local
1425 mr = alloc_cacheable_mr(pd, umem, iova, access_flags, in create_real_mr()
1432 mr = reg_create(pd, umem, iova, access_flags, page_size, in create_real_mr()
1436 if (IS_ERR(mr)) { in create_real_mr()
1438 return ERR_CAST(mr); in create_real_mr()
1441 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1447 * If the MR was created with reg_create then it will be in create_real_mr()
1451 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); in create_real_mr()
1453 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1457 return &mr->ibmr; in create_real_mr()
1466 struct mlx5_ib_mr *mr; in create_user_odp_mr() local
1481 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); in create_user_odp_mr()
1482 if (IS_ERR(mr)) in create_user_odp_mr()
1483 return ERR_CAST(mr); in create_user_odp_mr()
1484 return &mr->ibmr; in create_user_odp_mr()
1496 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags, in create_user_odp_mr()
1498 if (IS_ERR(mr)) { in create_user_odp_mr()
1500 return ERR_CAST(mr); in create_user_odp_mr()
1502 xa_init(&mr->implicit_children); in create_user_odp_mr()
1504 odp->private = mr; in create_user_odp_mr()
1505 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1509 err = mlx5_ib_init_odp_mr(mr); in create_user_odp_mr()
1512 return &mr->ibmr; in create_user_odp_mr()
1515 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1549 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb() local
1556 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); in mlx5_ib_dmabuf_invalidate_cb()
1572 struct mlx5_ib_mr *mr = NULL; in reg_user_mr_dmabuf() local
1596 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in reg_user_mr_dmabuf()
1598 if (IS_ERR(mr)) { in reg_user_mr_dmabuf()
1600 return ERR_CAST(mr); in reg_user_mr_dmabuf()
1603 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in reg_user_mr_dmabuf()
1605 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in reg_user_mr_dmabuf()
1606 umem_dmabuf->private = mr; in reg_user_mr_dmabuf()
1608 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in reg_user_mr_dmabuf()
1612 mr->data_direct = true; in reg_user_mr_dmabuf()
1615 err = mlx5_ib_init_dmabuf_mr(mr); in reg_user_mr_dmabuf()
1618 return &mr->ibmr; in reg_user_mr_dmabuf()
1621 __mlx5_ib_dereg_mr(&mr->ibmr); in reg_user_mr_dmabuf()
1734 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, in can_use_umr_rereg_pas() argument
1739 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1742 if (!mr->mmkey.cache_ent) in can_use_umr_rereg_pas()
1750 return (mr->mmkey.cache_ent->rb_key.ndescs) >= in can_use_umr_rereg_pas()
1754 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, in umr_rereg_pas() argument
1758 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1760 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1764 * To keep everything simple the MR is revoked before we start to mess in umr_rereg_pas()
1766 * MR. in umr_rereg_pas()
1768 err = mlx5r_umr_revoke_mr(mr); in umr_rereg_pas()
1773 mr->ibmr.pd = pd; in umr_rereg_pas()
1777 mr->access_flags = access_flags; in umr_rereg_pas()
1781 mr->ibmr.iova = iova; in umr_rereg_pas()
1782 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1783 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1784 mr->umem = new_umem; in umr_rereg_pas()
1785 err = mlx5r_umr_update_mr_pas(mr, upd_flags); in umr_rereg_pas()
1788 * The MR is revoked at this point so there is no issue to free in umr_rereg_pas()
1791 mr->umem = old_umem; in umr_rereg_pas()
1807 struct mlx5_ib_mr *mr = to_mmr(ib_mr); in mlx5_ib_rereg_user_mr() local
1810 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct) in mlx5_ib_rereg_user_mr()
1822 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1830 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1832 err = mlx5r_umr_rereg_pd_access(mr, new_pd, in mlx5_ib_rereg_user_mr()
1838 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ in mlx5_ib_rereg_user_mr()
1839 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1843 * Only one active MR can refer to a umem at one time, revoke in mlx5_ib_rereg_user_mr()
1844 * the old MR before assigning the umem to the new one. in mlx5_ib_rereg_user_mr()
1846 err = mlx5r_umr_revoke_mr(mr); in mlx5_ib_rereg_user_mr()
1849 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1850 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1853 return create_real_mr(new_pd, umem, mr->ibmr.iova, in mlx5_ib_rereg_user_mr()
1861 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1865 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1875 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, in mlx5_ib_rereg_user_mr()
1877 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, in mlx5_ib_rereg_user_mr()
1889 * Everything else has no state we can preserve, just create a new MR in mlx5_ib_rereg_user_mr()
1899 struct mlx5_ib_mr *mr, in mlx5_alloc_priv_descs() argument
1916 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1917 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1920 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1922 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1923 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1930 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1936 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) in mlx5_free_priv_descs() argument
1938 if (!mr->umem && !mr->data_direct && mr->descs) { in mlx5_free_priv_descs()
1939 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1940 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1943 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1945 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1946 mr->descs = NULL; in mlx5_free_priv_descs()
1951 struct mlx5_ib_mr *mr) in cache_ent_find_and_store() argument
1957 if (mr->mmkey.cache_ent) { in cache_ent_find_and_store()
1958 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
1959 mr->mmkey.cache_ent->in_use--; in cache_ent_find_and_store()
1964 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1966 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1971 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1972 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
1978 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1983 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1984 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
1987 ret = push_mkey_locked(mr->mmkey.cache_ent, mr->mmkey.key); in cache_ent_find_and_store()
1988 spin_unlock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
1992 static int mlx5_ib_revoke_data_direct_mr(struct mlx5_ib_mr *mr) in mlx5_ib_revoke_data_direct_mr() argument
1994 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in mlx5_ib_revoke_data_direct_mr()
1995 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); in mlx5_ib_revoke_data_direct_mr()
1999 mr->revoked = true; in mlx5_ib_revoke_data_direct_mr()
2000 err = mlx5r_umr_revoke_mr(mr); in mlx5_ib_revoke_data_direct_mr()
2010 struct mlx5_ib_mr *mr, *next; in mlx5_ib_revoke_data_direct_mrs() local
2014 list_for_each_entry_safe(mr, next, &dev->data_direct_mr_list, dd_node) { in mlx5_ib_revoke_data_direct_mrs()
2015 list_del(&mr->dd_node); in mlx5_ib_revoke_data_direct_mrs()
2016 mlx5_ib_revoke_data_direct_mr(mr); in mlx5_ib_revoke_data_direct_mrs()
2020 static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) in mlx5_revoke_mr() argument
2022 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in mlx5_revoke_mr()
2023 struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; in mlx5_revoke_mr()
2025 if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { in mlx5_revoke_mr()
2026 ent = mr->mmkey.cache_ent; in mlx5_revoke_mr()
2041 mr->mmkey.cache_ent = NULL; in mlx5_revoke_mr()
2044 return destroy_mkey(dev, mr); in mlx5_revoke_mr()
2049 struct mlx5_ib_mr *mr = to_mmr(ibmr); in __mlx5_ib_dereg_mr() local
2054 * Any async use of the mr must hold the refcount, once the refcount in __mlx5_ib_dereg_mr()
2059 refcount_read(&mr->mmkey.usecount) != 0 && in __mlx5_ib_dereg_mr()
2060 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in __mlx5_ib_dereg_mr()
2061 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in __mlx5_ib_dereg_mr()
2064 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in __mlx5_ib_dereg_mr()
2065 mr->sig, NULL, GFP_KERNEL); in __mlx5_ib_dereg_mr()
2067 if (mr->mtt_mr) { in __mlx5_ib_dereg_mr()
2068 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in __mlx5_ib_dereg_mr()
2071 mr->mtt_mr = NULL; in __mlx5_ib_dereg_mr()
2073 if (mr->klm_mr) { in __mlx5_ib_dereg_mr()
2074 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in __mlx5_ib_dereg_mr()
2077 mr->klm_mr = NULL; in __mlx5_ib_dereg_mr()
2081 mr->sig->psv_memory.psv_idx)) in __mlx5_ib_dereg_mr()
2083 mr->sig->psv_memory.psv_idx); in __mlx5_ib_dereg_mr()
2084 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in __mlx5_ib_dereg_mr()
2086 mr->sig->psv_wire.psv_idx); in __mlx5_ib_dereg_mr()
2087 kfree(mr->sig); in __mlx5_ib_dereg_mr()
2088 mr->sig = NULL; in __mlx5_ib_dereg_mr()
2092 rc = mlx5_revoke_mr(mr); in __mlx5_ib_dereg_mr()
2096 if (mr->umem) { in __mlx5_ib_dereg_mr()
2097 bool is_odp = is_odp_mr(mr); in __mlx5_ib_dereg_mr()
2100 atomic_sub(ib_umem_num_pages(mr->umem), in __mlx5_ib_dereg_mr()
2102 ib_umem_release(mr->umem); in __mlx5_ib_dereg_mr()
2104 mlx5_ib_free_odp_mr(mr); in __mlx5_ib_dereg_mr()
2107 if (!mr->mmkey.cache_ent) in __mlx5_ib_dereg_mr()
2108 mlx5_free_priv_descs(mr); in __mlx5_ib_dereg_mr()
2110 kfree(mr); in __mlx5_ib_dereg_mr()
2115 struct mlx5_ib_mr *mr) in dereg_crossing_data_direct_mr() argument
2117 struct mlx5_ib_mr *dd_crossed_mr = mr->dd_crossed_mr; in dereg_crossing_data_direct_mr()
2120 ret = __mlx5_ib_dereg_mr(&mr->ibmr); in dereg_crossing_data_direct_mr()
2135 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_dereg_mr() local
2138 if (mr->data_direct) in mlx5_ib_dereg_mr()
2139 return dereg_crossing_data_direct_mr(dev, mr); in mlx5_ib_dereg_mr()
2165 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in _mlx5_alloc_mkey_descs() argument
2172 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
2173 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
2174 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
2176 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
2182 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
2186 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
2187 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2188 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2193 mlx5_free_priv_descs(mr); in _mlx5_alloc_mkey_descs()
2204 struct mlx5_ib_mr *mr; in mlx5_ib_alloc_pi_mr() local
2208 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_alloc_pi_mr()
2209 if (!mr) in mlx5_ib_alloc_pi_mr()
2212 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2213 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2224 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, in mlx5_ib_alloc_pi_mr()
2229 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2232 return mr; in mlx5_ib_alloc_pi_mr()
2237 kfree(mr); in mlx5_ib_alloc_pi_mr()
2241 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_mem_reg_descs() argument
2244 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), in mlx5_alloc_mem_reg_descs()
2249 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_sg_gaps_descs() argument
2252 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), in mlx5_alloc_sg_gaps_descs()
2256 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_integrity_descs() argument
2265 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2266 if (!mr->sig) in mlx5_alloc_integrity_descs()
2274 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2275 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2277 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2278 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2280 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2281 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2284 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2285 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2288 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2291 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2292 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2301 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, in mlx5_alloc_integrity_descs()
2306 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2307 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2313 destroy_mkey(dev, mr); in mlx5_alloc_integrity_descs()
2314 mlx5_free_priv_descs(mr); in mlx5_alloc_integrity_descs()
2316 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2317 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2319 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2320 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2322 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2324 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2325 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2327 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2329 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2341 struct mlx5_ib_mr *mr; in __mlx5_ib_alloc_mr() local
2345 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in __mlx5_ib_alloc_mr()
2346 if (!mr) in __mlx5_ib_alloc_mr()
2355 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2356 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2360 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2363 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2366 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, in __mlx5_ib_alloc_mr()
2370 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); in __mlx5_ib_alloc_mr()
2379 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2384 kfree(mr); in __mlx5_ib_alloc_mr()
2486 * if the user bound an ODP MR to this MW. in mlx5_ib_dealloc_mw()
2509 pr_err("signature status check requested on a non-signature enabled MR\n"); in mlx5_ib_check_mr_status()
2540 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_pa_mr_sg_pi() local
2544 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2547 mr->mmkey.ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2550 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2551 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2554 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2559 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2560 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2562 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2569 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, in mlx5_ib_sg_to_klms() argument
2578 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2580 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2583 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2584 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2587 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2592 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2600 mr->mmkey.ndescs = i; in mlx5_ib_sg_to_klms()
2601 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2607 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2614 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2621 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2622 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2630 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page() local
2633 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) in mlx5_set_page()
2636 descs = mr->descs; in mlx5_set_page()
2637 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2644 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page_pi() local
2647 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2650 descs = mr->descs; in mlx5_set_page_pi()
2651 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2663 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mtt_mr_sg_pi() local
2664 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2704 * In order to use one MTT MR for data and metadata, we register in mlx5_ib_map_mtt_mr_sg_pi()
2706 * the metadata (the sig MR will verify that the HW will access in mlx5_ib_map_mtt_mr_sg_pi()
2728 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_klm_mr_sg_pi() local
2729 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2761 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg_pi() local
2767 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2768 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2769 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2770 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2771 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2791 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2798 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2808 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2812 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2820 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg() local
2823 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg()
2825 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2826 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2829 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2830 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, in mlx5_ib_map_mr_sg()
2836 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2837 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()