Lines Matching refs:access_flags

58 				     u64 iova, int access_flags,
246 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, in set_cache_mkc()
640 res = key1.access_flags - key2.access_flags; in cache_ent_key_cmp()
713 smallest->rb_key.access_flags == rb_key.access_flags && in mkey_cache_ent_from_rb_key()
722 int access_flags) in _mlx5_mr_cache_alloc() argument
760 int access_flags) in get_unchangeable_access_flags() argument
764 if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) && in get_unchangeable_access_flags()
769 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) && in get_unchangeable_access_flags()
774 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) && in get_unchangeable_access_flags()
784 int access_flags, int access_mode, in mlx5_mr_cache_alloc() argument
790 .access_flags = get_unchangeable_access_flags(dev, access_flags) in mlx5_mr_cache_alloc()
797 return _mlx5_mr_cache_alloc(dev, ent, access_flags); in mlx5_mr_cache_alloc()
1089 u64 length, int access_flags, u64 iova) in set_mr_fields() argument
1096 mr->access_flags = access_flags; in set_mr_fields()
1112 int access_flags, int access_mode) in alloc_cacheable_mr() argument
1129 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags); in alloc_cacheable_mr()
1130 rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags); in alloc_cacheable_mr()
1138 mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode); in alloc_cacheable_mr()
1147 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1154 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
1160 reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_flags, in reg_create_crossing_vhca_mr() argument
1193 set_mkc_access_pd_addr_fields(mkc, access_flags, 0, pd); in reg_create_crossing_vhca_mr()
1203 set_mr_fields(dev, mr, length, access_flags, iova); in reg_create_crossing_vhca_mr()
1221 u64 iova, int access_flags, in reg_create() argument
1243 mr->access_flags = access_flags; in reg_create()
1257 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND || ksm_mode)) { in reg_create()
1271 set_mkc_access_pd_addr_fields(mkc, access_flags, iova, in reg_create()
1290 if (mlx5_umem_needs_ats(dev, umem, access_flags)) in reg_create()
1305 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1392 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1402 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1412 attr->access_flags, mode); in mlx5_ib_reg_dm_mr()
1416 u64 iova, int access_flags) in create_real_mr() argument
1425 mr = alloc_cacheable_mr(pd, umem, iova, access_flags, in create_real_mr()
1432 mr = reg_create(pd, umem, iova, access_flags, page_size, in create_real_mr()
1461 u64 iova, int access_flags, in create_user_odp_mr() argument
1481 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); in create_user_odp_mr()
1491 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, in create_user_odp_mr()
1496 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags, in create_user_odp_mr()
1520 u64 iova, int access_flags, in mlx5_ib_reg_user_mr() argument
1531 start, iova, length, access_flags); in mlx5_ib_reg_user_mr()
1537 if (access_flags & IB_ACCESS_ON_DEMAND) in mlx5_ib_reg_user_mr()
1538 return create_user_odp_mr(pd, start, length, iova, access_flags, in mlx5_ib_reg_user_mr()
1540 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); in mlx5_ib_reg_user_mr()
1543 return create_real_mr(pd, umem, iova, access_flags); in mlx5_ib_reg_user_mr()
1568 int fd, int access_flags, int access_mode) in reg_user_mr_dmabuf() argument
1583 access_flags, in reg_user_mr_dmabuf()
1588 fd, access_flags); in reg_user_mr_dmabuf()
1597 access_flags, access_mode); in reg_user_mr_dmabuf()
1628 int fd, int access_flags) in reg_user_mr_dmabuf_by_data_direct() argument
1637 if (!PAGE_ALIGNED(virt_addr) || (access_flags & IB_ACCESS_ON_DEMAND)) in reg_user_mr_dmabuf_by_data_direct()
1651 access_flags &= ~IB_ACCESS_RELAXED_ORDERING; in reg_user_mr_dmabuf_by_data_direct()
1654 access_flags, MLX5_MKC_ACCESS_MODE_KSM); in reg_user_mr_dmabuf_by_data_direct()
1661 crossing_mr = reg_create_crossing_vhca_mr(pd, virt_addr, length, access_flags, in reg_user_mr_dmabuf_by_data_direct()
1680 int fd, int access_flags, in mlx5_ib_reg_user_mr_dmabuf() argument
1701 offset, virt_addr, length, fd, access_flags, mlx5_access_flags); in mlx5_ib_reg_user_mr_dmabuf()
1709 fd, access_flags); in mlx5_ib_reg_user_mr_dmabuf()
1713 fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT); in mlx5_ib_reg_user_mr_dmabuf()
1755 int access_flags, int flags, struct ib_umem *new_umem, in umr_rereg_pas() argument
1777 mr->access_flags = access_flags; in umr_rereg_pas()
1822 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1830 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1865 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()