Lines Matching +full:cm +full:- +full:poll +full:- +full:mode

1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
18 #include <linux/dma-mapping.h>
204 if (attr->gid_type == IB_GID_TYPE_IB) in rdma_gid_attr_network_type()
207 if (attr->gid_type == IB_GID_TYPE_ROCE) in rdma_gid_attr_network_type()
210 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) in rdma_gid_attr_network_type()
246 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
281 * This device supports a per-device lkey or stag that can be
288 /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
476 default: return -1; in ib_mtu_enum_to_int()
551 default: return -1; in ib_width_enum_to_int()
573 * @name - The name of the counter
574 * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
575 * @priv - Driver private information; Core code should not use
585 * @lock - Mutex to protect parallel write access to lifespan and values
588 * @timestamp - Used by the core code to track when the last update was
589 * @lifespan - Used by the core code to determine how old the counters
593 * @descs - Array of pointers to static descriptors used for the counters
595 * @is_disabled - A bitmap to indicate each counter is currently disabled
597 * @num_counters - How many hardware counters there are. If name is
601 * @value - Array of u64 counters that are accessed by the sysfs code and
774 (_ptr)->device = _device; \
775 (_ptr)->handler = _handler; \
776 INIT_LIST_HEAD(&(_ptr)->list); \
848 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
856 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
864 * enum ib_mr_type - memory region type
869 * the normal mr constraints - see
873 * @IB_MR_TYPE_USER: memory region that is used for the user-space
894 * struct ib_mr_status - Memory region status container
907 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
1114 * indices into a 2-entry table.
1162 /* reserve bits 26-31 for low level drivers' internal use */
1365 /* reserve bits 26-31 for low level drivers' internal use */
1476 ((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
1480 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1487 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1500 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1502 /* uobj is being cleaned-up before being committed */
1530 /* FIXME, save memory: ufile->context == context */
1585 IB_POLL_SOFTIRQ, /* poll from softirq context */
1586 IB_POLL_WORKQUEUE, /* poll from workqueue */
1587 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1867 /* default unicast and multicast rule -
1871 /* default multicast rule -
1875 /* sniffer rule - receive all port traffic */
2121 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2122 * This is done in order to share the same flags between user-space and
2209 /* rdma netdev type - specifies protocol type */
2216 * struct rdma_netdev - rdma netdev
2285 !__same_type(((struct drv_struct *)NULL)->member, \
2289 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2293 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2313 return (u64)entry->start_pgoff << PAGE_SHIFT; in rdma_user_mmap_get_offset()
2317 * struct ib_device_ops - InfiniBand device operations
2389 * must return -EOPNOTSUPP if it doesn't support the specified type.
2401 * link layer is either IB or iWarp. It is no-op if @port_num port
2408 * of device of port at gid index available at @attr. Meta-info of
2558 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2560 * the sysfs core when the device is removed. A lifespan of -1 in the
2567 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2568 * @index - The index in the value array we wish to have updated, or
2570 * Return codes -
2571 * < 0 - Error, no counters updated
2572 * index - Updated the single counter pointed to by index
2573 * num_counters - Updated all counters (will reset the timestamp
2582 * modify_hw_stat - Modify the counter configuration
2584 * Return codes - 0 on success or error code otherwise.
2612 /* iWarp CM callbacks */
2625 * counter_bind_qp - Bind a QP to a counter.
2626 * @counter - The counter to be bound. If counter->id is zero then
2627 * the driver needs to allocate a new counter and set counter->id
2631 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2636 * counter_dealloc -De-allocate the hw counter
2640 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2646 * counter_update_stats - Query the stats value of this counter
2667 * add_sub_dev - Add a sub IB device
2674 * del_sub_dev - Delete a sub IB device
2784 /* Used by iWarp CM */
2789 /* A parent device has a list of sub-devices */
2804 if (is_numa_aware && dev->ops.get_numa_node) in rdma_zalloc_obj()
2805 return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev)); in rdma_zalloc_obj()
2854 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2895 * rdma_block_iter_dma_address - get the aligned dma address of the current
2902 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); in rdma_block_iter_dma_address()
2906 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2921 * ib_get_client_data - Get IB client context
2933 return xa_load(&device->client_data, client->client_id); in ib_get_client_data()
2972 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; in ib_copy_from_udata()
2977 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; in ib_copy_to_udata()
3002 return ib_is_buffer_cleared(udata->inbuf + offset, len); in ib_is_udata_cleared()
3006 * ib_modify_qp_is_ok - Check that the supplied attribute mask
3014 * This function is a helper function that a low-level driver's
3034 * rdma_cap_ib_switch - Check if the device is IB switch
3044 return device->is_switch; in rdma_cap_ib_switch()
3048 * rdma_start_port - Return the first valid port number for the device
3061 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3062 * @device - The struct ib_device * to iterate over
3063 * @iter - The unsigned int to store the port number
3072 * rdma_end_port - Return the last valid port number for the device
3081 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; in rdma_end_port()
3094 return device->port_data[port_num].immutable.core_cap_flags & in rdma_is_grh_required()
3101 return device->port_data[port_num].immutable.core_cap_flags & in rdma_protocol_ib()
3108 return device->port_data[port_num].immutable.core_cap_flags & in rdma_protocol_roce()
3115 return device->port_data[port_num].immutable.core_cap_flags & in rdma_protocol_roce_udp_encap()
3122 return device->port_data[port_num].immutable.core_cap_flags & in rdma_protocol_roce_eth_encap()
3129 return device->port_data[port_num].immutable.core_cap_flags & in rdma_protocol_iwarp()
3143 return device->port_data[port_num].immutable.core_cap_flags & in rdma_protocol_raw_packet()
3150 return device->port_data[port_num].immutable.core_cap_flags & in rdma_protocol_usnic()
3155 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3168 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_ib_mad()
3173 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3193 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_opa_mad()
3198 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3219 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_ib_smi()
3224 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3229 * The InfiniBand Communication Manager is one of many pre-defined General
3235 * Return: true if the port supports an IB CM (this does not guarantee that
3236 * a CM is actually running however).
3240 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_ib_cm()
3245 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3253 * Return: true if the port supports an iWARP CM (this does not guarantee that
3254 * a CM is actually running however).
3258 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_iw_cm()
3263 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3268 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3279 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_ib_sa()
3284 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3307 * rdma_cap_af_ib - Check if the port of device has the capability
3321 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_af_ib()
3326 * rdma_cap_eth_ah - Check if the port of device has the capability
3343 return device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_eth_ah()
3348 * rdma_cap_opa_ah - Check if the port of device supports
3358 return (device->port_data[port_num].immutable.core_cap_flags & in rdma_cap_opa_ah()
3363 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3377 return device->port_data[port_num].immutable.max_mad_size; in rdma_max_mad_size()
3381 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3397 device->ops.add_gid && device->ops.del_gid; in rdma_cap_roce_gid_table()
3413 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3422 return (device->port_data[port_num].immutable.core_cap_flags & in rdma_core_cap_opa_port()
3427 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3433 * -1 if enum value of mtu is not supported.
3445 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3456 return attr->phys_mtu; in rdma_mtu_from_attr()
3458 return ib_mtu_enum_to_int(attr->max_mtu); in rdma_mtu_from_attr()
3493 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3507 * ib_alloc_pd - Allocates an unused protection domain.
3523 * ib_dealloc_pd - Deallocate kernel PD
3541 * rdma_create_ah - Creates an address handle for the given address vector.
3553 * rdma_create_user_ah - Creates an address handle for the given address vector.
3568 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3580 * ib_get_rdma_header_version - Get the header version
3586 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3608 * ib_create_ah_from_wc - Creates an address handle associated with the
3623 * rdma_modify_ah - Modifies the address vector associated with an address
3632 * rdma_query_ah - Queries the address vector associated with an address
3646 * rdma_destroy_ah_user - Destroys an address handle.
3654 * rdma_destroy_ah - Destroys an kernel address handle.
3674 if (!pd->device->ops.create_srq) in ib_create_srq()
3675 return ERR_PTR(-EOPNOTSUPP); in ib_create_srq()
3681 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3685 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3697 * ib_query_srq - Returns the attribute list and current values for the
3706 * ib_destroy_srq_user - Destroys the specified SRQ.
3713 * ib_destroy_srq - Destroys the specified kernel SRQ.
3726 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3738 return srq->device->ops.post_srq_recv(srq, recv_wr, in ib_post_srq_recv()
3746 * ib_create_qp - Creates a kernel QP associated with the specific protection
3760 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3764 * @attr_mask: A bit-mask used to specify which attributes of the QP
3776 * ib_modify_qp - Modifies the attributes for the specified QP and then
3781 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3789 * ib_query_qp - Returns the attribute list and current values for the
3793 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3805 * ib_destroy_qp - Destroys the specified QP.
3812 * ib_destroy_qp - Destroys the specified kernel QP.
3823 * ib_open_qp - Obtain a reference to an existing sharable QP.
3824 * @xrcd - XRC domain
3833 * ib_close_qp - Release an external reference to a QP.
3842 * ib_post_send - Posts a list of work requests to the send queue of
3860 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); in ib_post_send()
3864 * ib_post_recv - Posts a list of work requests to the receive queue of
3877 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); in ib_post_recv()
3914 * ib_create_cq - Creates a CQ on the specified device.
3916 * @comp_handler: A user-specified callback that is invoked when a
3918 * @event_handler: A user-specified callback that is invoked when an
3936 * ib_resize_cq - Modifies the capacity of the CQ.
3945 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3954 * ib_destroy_cq_user - Destroys the specified CQ.
3961 * ib_destroy_cq - Destroys the specified kernel CQ.
3974 * ib_poll_cq - poll a CQ for completion(s)
3980 * Poll a CQ for (possibly multiple) completions. If the return value
3983 * non-negative and < num_entries, then the CQ was emptied.
3988 return cq->device->ops.poll_cq(cq, num_entries, wc); in ib_poll_cq()
3992 * ib_req_notify_cq - Request completion notification on a CQ.
4007 * to the CQ since the last CQ poll will trigger a completion
4010 * in. It means that the consumer must poll the CQ again to
4015 * to the CQ since the last poll without triggering a
4021 return cq->device->ops.req_notify_cq(cq, flags); in ib_req_notify_cq()
4037 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device; in ib_uses_virt_dma()
4048 return dma_pci_p2pdma_supported(dev->dma_device); in ib_dma_pci_p2p_dma_supported()
4052 * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4060 /* virt_dma mode maps the kvs's directly into the dma addr */ in ib_virt_dma_to_ptr()
4065 * ib_virt_dma_to_page - Convert a dma_addr to a struct page
4077 * ib_dma_mapping_error - check a DMA addr for error
4085 return dma_mapping_error(dev->dma_device, dma_addr); in ib_dma_mapping_error()
4089 * ib_dma_map_single - Map a kernel virtual address to DMA address
4101 return dma_map_single(dev->dma_device, cpu_addr, size, direction); in ib_dma_map_single()
4105 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4116 dma_unmap_single(dev->dma_device, addr, size, direction); in ib_dma_unmap_single()
4120 * ib_dma_map_page - Map a physical page to DMA address
4135 return dma_map_page(dev->dma_device, page, offset, size, direction); in ib_dma_map_page()
4139 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4150 dma_unmap_page(dev->dma_device, addr, size, direction); in ib_dma_unmap_page()
4161 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, in ib_dma_map_sg_attrs()
4171 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, in ib_dma_unmap_sg_attrs()
4176 * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4190 nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents); in ib_dma_map_sgtable_attrs()
4192 return -EIO; in ib_dma_map_sgtable_attrs()
4193 sgt->nents = nents; in ib_dma_map_sgtable_attrs()
4196 return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs); in ib_dma_map_sgtable_attrs()
4205 dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs); in ib_dma_unmap_sgtable_attrs()
4209 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4223 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4237 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4246 return dma_get_max_seg_size(dev->dma_device); in ib_dma_max_seg_size()
4250 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4262 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); in ib_dma_sync_single_for_cpu()
4266 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4278 dma_sync_single_for_device(dev->dma_device, addr, size, dir); in ib_dma_sync_single_for_device()
4281 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4287 /* ib_advise_mr - give an advice about an address range in a memory region */
4291 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4301 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4322 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4324 * @mr - struct ib_mr pointer to be updated.
4325 * @newkey - new key to be used.
4329 mr->lkey = (mr->lkey & 0xffffff00) | newkey; in ib_update_fast_reg_key()
4330 mr->rkey = (mr->rkey & 0xffffff00) | newkey; in ib_update_fast_reg_key()
4334 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4336 * @rkey - the rkey to increment.
4345 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4359 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4373 u64 device_cap = ib_dev->attrs.device_cap_flags; in ib_check_mr_access()
4381 return -EINVAL; in ib_check_mr_access()
4384 return -EINVAL; in ib_check_mr_access()
4387 !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING)) in ib_check_mr_access()
4388 return -EOPNOTSUPP; in ib_check_mr_access()
4394 return -EOPNOTSUPP; in ib_check_mr_access()
4438 * be valid should use get_device(&ibdev->dev) to hold the memory.
4443 return refcount_inc_not_zero(&dev->refcount); in ib_device_try_get()
4476 mr->iova = 0; in ib_map_mr_sg_zbva()
4493 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) in rdma_ah_retrieve_dmac()
4494 return attr->roce.dmac; in rdma_ah_retrieve_dmac()
4500 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_set_dlid()
4501 attr->ib.dlid = (u16)dlid; in rdma_ah_set_dlid()
4502 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_set_dlid()
4503 attr->opa.dlid = dlid; in rdma_ah_set_dlid()
4508 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_get_dlid()
4509 return attr->ib.dlid; in rdma_ah_get_dlid()
4510 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_get_dlid()
4511 return attr->opa.dlid; in rdma_ah_get_dlid()
4517 attr->sl = sl; in rdma_ah_set_sl()
4522 return attr->sl; in rdma_ah_get_sl()
4528 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_set_path_bits()
4529 attr->ib.src_path_bits = src_path_bits; in rdma_ah_set_path_bits()
4530 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_set_path_bits()
4531 attr->opa.src_path_bits = src_path_bits; in rdma_ah_set_path_bits()
4536 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_get_path_bits()
4537 return attr->ib.src_path_bits; in rdma_ah_get_path_bits()
4538 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_get_path_bits()
4539 return attr->opa.src_path_bits; in rdma_ah_get_path_bits()
4546 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_set_make_grd()
4547 attr->opa.make_grd = make_grd; in rdma_ah_set_make_grd()
4552 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_get_make_grd()
4553 return attr->opa.make_grd; in rdma_ah_get_make_grd()
4559 attr->port_num = port_num; in rdma_ah_set_port_num()
4564 return attr->port_num; in rdma_ah_get_port_num()
4570 attr->static_rate = static_rate; in rdma_ah_set_static_rate()
4575 return attr->static_rate; in rdma_ah_get_static_rate()
4581 attr->ah_flags = flag; in rdma_ah_set_ah_flags()
4587 return attr->ah_flags; in rdma_ah_get_ah_flags()
4593 return &attr->grh; in rdma_ah_read_grh()
4600 return &attr->grh; in rdma_ah_retrieve_grh()
4607 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); in rdma_ah_set_dgid_raw()
4615 grh->dgid.global.subnet_prefix = prefix; in rdma_ah_set_subnet_prefix()
4623 grh->dgid.global.interface_id = if_id; in rdma_ah_set_interface_id()
4633 attr->ah_flags = IB_AH_GRH; in rdma_ah_set_grh()
4635 grh->dgid = *dgid; in rdma_ah_set_grh()
4636 grh->flow_label = flow_label; in rdma_ah_set_grh()
4637 grh->sgid_index = sgid_index; in rdma_ah_set_grh()
4638 grh->hop_limit = hop_limit; in rdma_ah_set_grh()
4639 grh->traffic_class = traffic_class; in rdma_ah_set_grh()
4640 grh->sgid_attr = NULL; in rdma_ah_set_grh()
4654 * rdma_ah_find_type - Return address handle type.
4669 if (dev->type == RDMA_DEVICE_TYPE_SMI) in rdma_ah_find_type()
4676 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4691 * ib_lid_be16 - Return lid in 16bit BE encoding.
4702 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4708 * completion vector (returns all-cpus map if the device driver doesn't
4714 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || in ib_get_vector_affinity()
4715 !device->ops.get_vector_affinity) in ib_get_vector_affinity()
4718 return device->ops.get_vector_affinity(device, comp_vector); in ib_get_vector_affinity()
4723 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4746 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4758 return coredev->owner; in rdma_device_to_ibdev()
4762 * ibdev_to_node - return the NUMA node for a given ib_device
4767 struct device *parent = ibdev->dev.parent; in ibdev_to_node()
4775 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4780 * ops->device_group.
4793 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4809 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4834 * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
4853 /** ib_add_sub_device - Add a sub IB device on an existing one
4867 /** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get'
4877 ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER; in ib_mark_name_assigned_by_user()