Lines Matching +full:protection +full:- +full:domain
20 * - Redistributions of source code must retain the above
24 * - Redistributions in binary form must reproduce the above
97 [IB_WC_LOC_PROT_ERR] = "local protection error",
151 default: return -1; in ib_rate_to_mult()
211 default: return -1; in ib_rate_to_mbps()
237 if (device->ops.get_link_layer) in rdma_port_get_link_layer()
238 return device->ops.get_link_layer(device, port_num); in rdma_port_get_link_layer()
240 lt = rdma_node_get_transport(device->node_type); in rdma_port_get_link_layer()
248 /* Protection domains */
251 * __ib_alloc_pd - Allocates an unused protection domain.
252 * @device: The device on which to allocate the protection domain.
253 * @flags: protection domain flags
254 * @caller: caller's build-time module name
256 * A protection domain object provides an association between QPs, shared
271 return ERR_PTR(-ENOMEM); in __ib_alloc_pd()
273 pd->device = device; in __ib_alloc_pd()
274 pd->flags = flags; in __ib_alloc_pd()
276 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); in __ib_alloc_pd()
277 rdma_restrack_set_name(&pd->res, caller); in __ib_alloc_pd()
279 ret = device->ops.alloc_pd(pd, NULL); in __ib_alloc_pd()
281 rdma_restrack_put(&pd->res); in __ib_alloc_pd()
285 rdma_restrack_add(&pd->res); in __ib_alloc_pd()
287 if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY) in __ib_alloc_pd()
288 pd->local_dma_lkey = device->local_dma_lkey; in __ib_alloc_pd()
300 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags); in __ib_alloc_pd()
306 mr->device = pd->device; in __ib_alloc_pd()
307 mr->pd = pd; in __ib_alloc_pd()
308 mr->type = IB_MR_TYPE_DMA; in __ib_alloc_pd()
309 mr->uobject = NULL; in __ib_alloc_pd()
310 mr->need_inval = false; in __ib_alloc_pd()
312 pd->__internal_mr = mr; in __ib_alloc_pd()
314 if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)) in __ib_alloc_pd()
315 pd->local_dma_lkey = pd->__internal_mr->lkey; in __ib_alloc_pd()
318 pd->unsafe_global_rkey = pd->__internal_mr->rkey; in __ib_alloc_pd()
326 * ib_dealloc_pd_user - Deallocates a protection domain.
327 * @pd: The protection domain to deallocate.
338 if (pd->__internal_mr) { in ib_dealloc_pd_user()
339 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL); in ib_dealloc_pd_user()
341 pd->__internal_mr = NULL; in ib_dealloc_pd_user()
344 ret = pd->device->ops.dealloc_pd(pd, udata); in ib_dealloc_pd_user()
348 rdma_restrack_del(&pd->res); in ib_dealloc_pd_user()
357 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
366 if (dest->grh.sgid_attr) in rdma_copy_ah_attr()
367 rdma_hold_gid_attr(dest->grh.sgid_attr); in rdma_copy_ah_attr()
372 * rdma_replace_ah_attr - Replace valid ah_attr with new one.
386 if (old->grh.sgid_attr) in rdma_replace_ah_attr()
387 rdma_hold_gid_attr(old->grh.sgid_attr); in rdma_replace_ah_attr()
392 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
406 src->grh.sgid_attr = NULL; in rdma_move_ah_attr()
417 if (!rdma_is_port_valid(device, ah_attr->port_num)) in rdma_check_ah_attr()
418 return -EINVAL; in rdma_check_ah_attr()
420 if ((rdma_is_grh_required(device, ah_attr->port_num) || in rdma_check_ah_attr()
421 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) && in rdma_check_ah_attr()
422 !(ah_attr->ah_flags & IB_AH_GRH)) in rdma_check_ah_attr()
423 return -EINVAL; in rdma_check_ah_attr()
425 if (ah_attr->grh.sgid_attr) { in rdma_check_ah_attr()
430 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index || in rdma_check_ah_attr()
431 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num) in rdma_check_ah_attr()
432 return -EINVAL; in rdma_check_ah_attr()
449 *old_sgid_attr = ah_attr->grh.sgid_attr; in rdma_fill_sgid_attr()
455 if (!(ah_attr->ah_flags & IB_AH_GRH)) in rdma_fill_sgid_attr()
459 if (grh->sgid_attr) in rdma_fill_sgid_attr()
463 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index); in rdma_fill_sgid_attr()
468 grh->sgid_attr = sgid_attr; in rdma_fill_sgid_attr()
479 if (ah_attr->grh.sgid_attr == old_sgid_attr) in rdma_unfill_sgid_attr()
496 if (ah_attr->ah_flags & IB_AH_GRH) { in rdma_update_sgid_attr()
497 rdma_hold_gid_attr(ah_attr->grh.sgid_attr); in rdma_update_sgid_attr()
498 return ah_attr->grh.sgid_attr; in rdma_update_sgid_attr()
510 struct ib_device *device = pd->device; in _rdma_create_ah()
516 if (!udata && !device->ops.create_ah) in _rdma_create_ah()
517 return ERR_PTR(-EOPNOTSUPP); in _rdma_create_ah()
523 return ERR_PTR(-ENOMEM); in _rdma_create_ah()
525 ah->device = device; in _rdma_create_ah()
526 ah->pd = pd; in _rdma_create_ah()
527 ah->type = ah_attr->type; in _rdma_create_ah()
528 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL); in _rdma_create_ah()
534 ret = device->ops.create_user_ah(ah, &init_attr, udata); in _rdma_create_ah()
536 ret = device->ops.create_ah(ah, &init_attr, NULL); in _rdma_create_ah()
538 if (ah->sgid_attr) in _rdma_create_ah()
539 rdma_put_gid_attr(ah->sgid_attr); in _rdma_create_ah()
544 atomic_inc(&pd->usecnt); in _rdma_create_ah()
549 * rdma_create_ah - Creates an address handle for the
551 * @pd: The protection domain associated with the address handle.
567 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); in rdma_create_ah()
570 slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr, in rdma_create_ah()
585 * rdma_create_user_ah - Creates an address handle for the
588 * @pd: The protection domain associated with the address handle.
605 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); in rdma_create_user_ah()
609 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { in rdma_create_user_ah()
610 err = ib_resolve_eth_dmac(pd->device, ah_attr); in rdma_create_user_ah()
628 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; in ib_get_rdma_header_version()
630 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; in ib_get_rdma_header_version()
635 if (ip6h->version != 6) in ib_get_rdma_header_version()
636 return (ip4h->version == 4) ? 4 : 0; in ib_get_rdma_header_version()
642 if (ip4h->ihl != 5) in ib_get_rdma_header_version()
653 if (ip4h->check == ip4h_checked.check) in ib_get_rdma_header_version()
673 if (grh->next_hdr == IPPROTO_UDP) in ib_get_net_type_by_grh()
692 if (ctx->gid_type != gid_attr->gid_type) in find_gid_index()
699 return ctx->vlan_id == vlan_id; in find_gid_index()
723 return -EINVAL; in ib_get_gids_from_rdma_hdr()
727 &hdr->roce4grh.saddr, 4); in ib_get_gids_from_rdma_hdr()
729 &hdr->roce4grh.daddr, 4); in ib_get_gids_from_rdma_hdr()
739 *dgid = hdr->ibgrh.dgid; in ib_get_gids_from_rdma_hdr()
740 *sgid = hdr->ibgrh.sgid; in ib_get_gids_from_rdma_hdr()
743 return -EINVAL; in ib_get_gids_from_rdma_hdr()
756 const struct ib_gid_attr *sgid_attr = grh->sgid_attr; in ib_resolve_unicast_gid_dmac()
763 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) && in ib_resolve_unicast_gid_dmac()
764 sgid_attr->gid_type == IB_GID_TYPE_ROCE) { in ib_resolve_unicast_gid_dmac()
765 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw, in ib_resolve_unicast_gid_dmac()
766 ah_attr->roce.dmac); in ib_resolve_unicast_gid_dmac()
770 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid, in ib_resolve_unicast_gid_dmac()
771 ah_attr->roce.dmac, in ib_resolve_unicast_gid_dmac()
774 grh->hop_limit = hop_limit; in ib_resolve_unicast_gid_dmac()
806 ah_attr->type = rdma_ah_find_type(device, port_num); in ib_init_ah_attr_from_wc()
808 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) in ib_init_ah_attr_from_wc()
809 net_type = wc->network_hdr_type; in ib_init_ah_attr_from_wc()
819 rdma_ah_set_sl(ah_attr, wc->sl); in ib_init_ah_attr_from_wc()
823 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? in ib_init_ah_attr_from_wc()
824 wc->vlan_id : 0xffff; in ib_init_ah_attr_from_wc()
826 if (!(wc->wc_flags & IB_WC_GRH)) in ib_init_ah_attr_from_wc()
827 return -EPROTOTYPE; in ib_init_ah_attr_from_wc()
835 flow_class = be32_to_cpu(grh->version_tclass_flow); in ib_init_ah_attr_from_wc()
849 rdma_ah_set_dlid(ah_attr, wc->slid); in ib_init_ah_attr_from_wc()
850 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits); in ib_init_ah_attr_from_wc()
852 if ((wc->wc_flags & IB_WC_GRH) == 0) in ib_init_ah_attr_from_wc()
864 flow_class = be32_to_cpu(grh->version_tclass_flow); in ib_init_ah_attr_from_wc()
878 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
896 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit, in rdma_move_grh_sgid_attr()
898 attr->grh.sgid_attr = sgid_attr; in rdma_move_grh_sgid_attr()
903 * rdma_destroy_ah_attr - Release reference to SGID attribute of
913 if (ah_attr->grh.sgid_attr) { in rdma_destroy_ah_attr()
914 rdma_put_gid_attr(ah_attr->grh.sgid_attr); in rdma_destroy_ah_attr()
915 ah_attr->grh.sgid_attr = NULL; in rdma_destroy_ah_attr()
927 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr); in ib_create_ah_from_wc()
943 if (ah->type != ah_attr->type) in rdma_modify_ah()
944 return -EINVAL; in rdma_modify_ah()
946 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr); in rdma_modify_ah()
950 ret = ah->device->ops.modify_ah ? in rdma_modify_ah()
951 ah->device->ops.modify_ah(ah, ah_attr) : in rdma_modify_ah()
952 -EOPNOTSUPP; in rdma_modify_ah()
954 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr); in rdma_modify_ah()
962 ah_attr->grh.sgid_attr = NULL; in rdma_query_ah()
964 return ah->device->ops.query_ah ? in rdma_query_ah()
965 ah->device->ops.query_ah(ah, ah_attr) : in rdma_query_ah()
966 -EOPNOTSUPP; in rdma_query_ah()
972 const struct ib_gid_attr *sgid_attr = ah->sgid_attr; in rdma_destroy_ah_user()
978 pd = ah->pd; in rdma_destroy_ah_user()
980 ret = ah->device->ops.destroy_ah(ah, flags); in rdma_destroy_ah_user()
984 atomic_dec(&pd->usecnt); in rdma_destroy_ah_user()
996 * ib_create_srq_user - Creates a SRQ associated with the specified protection
997 * domain.
998 * @pd: The protection domain associated with the SRQ.
1005 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1018 srq = rdma_zalloc_drv_obj(pd->device, ib_srq); in ib_create_srq_user()
1020 return ERR_PTR(-ENOMEM); in ib_create_srq_user()
1022 srq->device = pd->device; in ib_create_srq_user()
1023 srq->pd = pd; in ib_create_srq_user()
1024 srq->event_handler = srq_init_attr->event_handler; in ib_create_srq_user()
1025 srq->srq_context = srq_init_attr->srq_context; in ib_create_srq_user()
1026 srq->srq_type = srq_init_attr->srq_type; in ib_create_srq_user()
1027 srq->uobject = uobject; in ib_create_srq_user()
1029 if (ib_srq_has_cq(srq->srq_type)) { in ib_create_srq_user()
1030 srq->ext.cq = srq_init_attr->ext.cq; in ib_create_srq_user()
1031 atomic_inc(&srq->ext.cq->usecnt); in ib_create_srq_user()
1033 if (srq->srq_type == IB_SRQT_XRC) { in ib_create_srq_user()
1034 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; in ib_create_srq_user()
1035 if (srq->ext.xrc.xrcd) in ib_create_srq_user()
1036 atomic_inc(&srq->ext.xrc.xrcd->usecnt); in ib_create_srq_user()
1038 atomic_inc(&pd->usecnt); in ib_create_srq_user()
1040 rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ); in ib_create_srq_user()
1041 rdma_restrack_parent_name(&srq->res, &pd->res); in ib_create_srq_user()
1043 ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); in ib_create_srq_user()
1045 rdma_restrack_put(&srq->res); in ib_create_srq_user()
1046 atomic_dec(&pd->usecnt); in ib_create_srq_user()
1047 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) in ib_create_srq_user()
1048 atomic_dec(&srq->ext.xrc.xrcd->usecnt); in ib_create_srq_user()
1049 if (ib_srq_has_cq(srq->srq_type)) in ib_create_srq_user()
1050 atomic_dec(&srq->ext.cq->usecnt); in ib_create_srq_user()
1055 rdma_restrack_add(&srq->res); in ib_create_srq_user()
1065 return srq->device->ops.modify_srq ? in ib_modify_srq()
1066 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask, in ib_modify_srq()
1067 NULL) : -EOPNOTSUPP; in ib_modify_srq()
1074 return srq->device->ops.query_srq ? in ib_query_srq()
1075 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP; in ib_query_srq()
1083 if (atomic_read(&srq->usecnt)) in ib_destroy_srq_user()
1084 return -EBUSY; in ib_destroy_srq_user()
1086 ret = srq->device->ops.destroy_srq(srq, udata); in ib_destroy_srq_user()
1090 atomic_dec(&srq->pd->usecnt); in ib_destroy_srq_user()
1091 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) in ib_destroy_srq_user()
1092 atomic_dec(&srq->ext.xrc.xrcd->usecnt); in ib_destroy_srq_user()
1093 if (ib_srq_has_cq(srq->srq_type)) in ib_destroy_srq_user()
1094 atomic_dec(&srq->ext.cq->usecnt); in ib_destroy_srq_user()
1095 rdma_restrack_del(&srq->res); in ib_destroy_srq_user()
1106 struct ib_qp *qp = event->element.qp; in __ib_qp_event_handler()
1108 if (event->event == IB_EVENT_QP_LAST_WQE_REACHED) in __ib_qp_event_handler()
1109 complete(&qp->srq_completion); in __ib_qp_event_handler()
1110 if (qp->registered_event_handler) in __ib_qp_event_handler()
1111 qp->registered_event_handler(event, qp->qp_context); in __ib_qp_event_handler()
1119 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags); in __ib_shared_qp_event_handler()
1120 list_for_each_entry(event->element.qp, &qp->open_list, open_list) in __ib_shared_qp_event_handler()
1121 if (event->element.qp->event_handler) in __ib_shared_qp_event_handler()
1122 event->element.qp->event_handler(event, event->element.qp->qp_context); in __ib_shared_qp_event_handler()
1123 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags); in __ib_shared_qp_event_handler()
1136 return ERR_PTR(-ENOMEM); in __ib_open_qp()
1138 qp->real_qp = real_qp; in __ib_open_qp()
1139 err = ib_open_shared_qp_security(qp, real_qp->device); in __ib_open_qp()
1145 qp->real_qp = real_qp; in __ib_open_qp()
1146 atomic_inc(&real_qp->usecnt); in __ib_open_qp()
1147 qp->device = real_qp->device; in __ib_open_qp()
1148 qp->event_handler = event_handler; in __ib_open_qp()
1149 qp->qp_context = qp_context; in __ib_open_qp()
1150 qp->qp_num = real_qp->qp_num; in __ib_open_qp()
1151 qp->qp_type = real_qp->qp_type; in __ib_open_qp()
1153 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); in __ib_open_qp()
1154 list_add(&qp->open_list, &real_qp->open_list); in __ib_open_qp()
1155 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); in __ib_open_qp()
1165 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) in ib_open_qp()
1166 return ERR_PTR(-EINVAL); in ib_open_qp()
1168 down_read(&xrcd->tgt_qps_rwsem); in ib_open_qp()
1169 real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num); in ib_open_qp()
1171 up_read(&xrcd->tgt_qps_rwsem); in ib_open_qp()
1172 return ERR_PTR(-EINVAL); in ib_open_qp()
1174 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, in ib_open_qp()
1175 qp_open_attr->qp_context); in ib_open_qp()
1176 up_read(&xrcd->tgt_qps_rwsem); in ib_open_qp()
1187 qp->event_handler = __ib_shared_qp_event_handler; in create_xrc_qp_user()
1188 qp->qp_context = qp; in create_xrc_qp_user()
1189 qp->pd = NULL; in create_xrc_qp_user()
1190 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user()
1191 qp->srq = NULL; in create_xrc_qp_user()
1192 qp->xrcd = qp_init_attr->xrcd; in create_xrc_qp_user()
1193 atomic_inc(&qp_init_attr->xrcd->usecnt); in create_xrc_qp_user()
1194 INIT_LIST_HEAD(&qp->open_list); in create_xrc_qp_user()
1196 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, in create_xrc_qp_user()
1197 qp_init_attr->qp_context); in create_xrc_qp_user()
1201 err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num, in create_xrc_qp_user()
1219 if (!dev->ops.create_qp) in create_qp()
1220 return ERR_PTR(-EOPNOTSUPP); in create_qp()
1224 return ERR_PTR(-ENOMEM); in create_qp()
1226 qp->device = dev; in create_qp()
1227 qp->pd = pd; in create_qp()
1228 qp->uobject = uobj; in create_qp()
1229 qp->real_qp = qp; in create_qp()
1231 qp->qp_type = attr->qp_type; in create_qp()
1232 qp->rwq_ind_tbl = attr->rwq_ind_tbl; in create_qp()
1233 qp->srq = attr->srq; in create_qp()
1234 qp->event_handler = __ib_qp_event_handler; in create_qp()
1235 qp->registered_event_handler = attr->event_handler; in create_qp()
1236 qp->port = attr->port_num; in create_qp()
1237 qp->qp_context = attr->qp_context; in create_qp()
1239 spin_lock_init(&qp->mr_lock); in create_qp()
1240 INIT_LIST_HEAD(&qp->rdma_mrs); in create_qp()
1241 INIT_LIST_HEAD(&qp->sig_mrs); in create_qp()
1242 init_completion(&qp->srq_completion); in create_qp()
1244 qp->send_cq = attr->send_cq; in create_qp()
1245 qp->recv_cq = attr->recv_cq; in create_qp()
1247 rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); in create_qp()
1249 rdma_restrack_set_name(&qp->res, udata ? NULL : caller); in create_qp()
1250 ret = dev->ops.create_qp(qp, attr, udata); in create_qp()
1258 qp->send_cq = attr->send_cq; in create_qp()
1259 qp->recv_cq = attr->recv_cq; in create_qp()
1265 rdma_restrack_add(&qp->res); in create_qp()
1269 qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL); in create_qp()
1271 rdma_restrack_put(&qp->res); in create_qp()
1278 * ib_create_qp_user - Creates a QP associated with the specified protection
1279 * domain.
1281 * @pd: The protection domain associated with the QP.
1287 * @caller: caller's build-time module name
1296 if (attr->qp_type == IB_QPT_XRC_TGT) in ib_create_qp_user()
1300 if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp)) in ib_create_qp_user()
1309 xrc_qp->uobject = uobj; in ib_create_qp_user()
1316 if (qp->pd) in ib_qp_usecnt_inc()
1317 atomic_inc(&qp->pd->usecnt); in ib_qp_usecnt_inc()
1318 if (qp->send_cq) in ib_qp_usecnt_inc()
1319 atomic_inc(&qp->send_cq->usecnt); in ib_qp_usecnt_inc()
1320 if (qp->recv_cq) in ib_qp_usecnt_inc()
1321 atomic_inc(&qp->recv_cq->usecnt); in ib_qp_usecnt_inc()
1322 if (qp->srq) in ib_qp_usecnt_inc()
1323 atomic_inc(&qp->srq->usecnt); in ib_qp_usecnt_inc()
1324 if (qp->rwq_ind_tbl) in ib_qp_usecnt_inc()
1325 atomic_inc(&qp->rwq_ind_tbl->usecnt); in ib_qp_usecnt_inc()
1331 if (qp->rwq_ind_tbl) in ib_qp_usecnt_dec()
1332 atomic_dec(&qp->rwq_ind_tbl->usecnt); in ib_qp_usecnt_dec()
1333 if (qp->srq) in ib_qp_usecnt_dec()
1334 atomic_dec(&qp->srq->usecnt); in ib_qp_usecnt_dec()
1335 if (qp->recv_cq) in ib_qp_usecnt_dec()
1336 atomic_dec(&qp->recv_cq->usecnt); in ib_qp_usecnt_dec()
1337 if (qp->send_cq) in ib_qp_usecnt_dec()
1338 atomic_dec(&qp->send_cq->usecnt); in ib_qp_usecnt_dec()
1339 if (qp->pd) in ib_qp_usecnt_dec()
1340 atomic_dec(&qp->pd->usecnt); in ib_qp_usecnt_dec()
1348 struct ib_device *device = pd->device; in ib_create_qp_kernel()
1358 if (qp_init_attr->cap.max_rdma_ctxs) in ib_create_qp_kernel()
1367 if (qp_init_attr->cap.max_rdma_ctxs) { in ib_create_qp_kernel()
1378 qp->max_write_sge = qp_init_attr->cap.max_send_sge; in ib_create_qp_kernel()
1379 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, in ib_create_qp_kernel()
1380 device->attrs.max_sge_rd); in ib_create_qp_kernel()
1381 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) in ib_create_qp_kernel()
1382 qp->integrity_en = true; in ib_create_qp_kernel()
1727 * ib_resolve_eth_dmac - Resolve destination mac address
1740 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { in ib_resolve_eth_dmac()
1741 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { in ib_resolve_eth_dmac()
1744 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); in ib_resolve_eth_dmac()
1745 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac); in ib_resolve_eth_dmac()
1747 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, in ib_resolve_eth_dmac()
1748 (char *)ah_attr->roce.dmac); in ib_resolve_eth_dmac()
1758 return (qp->qp_type == IB_QPT_UC || in is_qp_type_connected()
1759 qp->qp_type == IB_QPT_RC || in is_qp_type_connected()
1760 qp->qp_type == IB_QPT_XRC_INI || in is_qp_type_connected()
1761 qp->qp_type == IB_QPT_XRC_TGT); in is_qp_type_connected()
1770 u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in _ib_modify_qp()
1775 attr->xmit_slave = NULL; in _ib_modify_qp()
1777 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr, in _ib_modify_qp()
1782 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && in _ib_modify_qp()
1792 ret = ib_resolve_eth_dmac(qp->device, in _ib_modify_qp()
1793 &attr->ah_attr); in _ib_modify_qp()
1797 slave = rdma_lag_get_ah_roce_slave(qp->device, in _ib_modify_qp()
1798 &attr->ah_attr, in _ib_modify_qp()
1804 attr->xmit_slave = slave; in _ib_modify_qp()
1811 * from primary->alternate we will keep the wrong in _ib_modify_qp()
1815 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr, in _ib_modify_qp()
1824 if (!(rdma_protocol_ib(qp->device, in _ib_modify_qp()
1825 attr->alt_ah_attr.port_num) && in _ib_modify_qp()
1826 rdma_protocol_ib(qp->device, port))) { in _ib_modify_qp()
1827 ret = -EINVAL; in _ib_modify_qp()
1832 if (rdma_ib_or_roce(qp->device, port)) { in _ib_modify_qp()
1833 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { in _ib_modify_qp()
1834 dev_warn(&qp->device->dev, in _ib_modify_qp()
1837 attr->rq_psn &= 0xffffff; in _ib_modify_qp()
1840 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { in _ib_modify_qp()
1841 dev_warn(&qp->device->dev, in _ib_modify_qp()
1844 attr->sq_psn &= 0xffffff; in _ib_modify_qp()
1852 if (!qp->counter && (attr_mask & IB_QP_PORT) && in _ib_modify_qp()
1853 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT)) in _ib_modify_qp()
1854 rdma_counter_bind_qp_auto(qp, attr->port_num); in _ib_modify_qp()
1861 qp->port = attr->port_num; in _ib_modify_qp()
1863 qp->av_sgid_attr = in _ib_modify_qp()
1864 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr); in _ib_modify_qp()
1866 qp->alt_path_sgid_attr = rdma_update_sgid_attr( in _ib_modify_qp()
1867 &attr->alt_ah_attr, qp->alt_path_sgid_attr); in _ib_modify_qp()
1871 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av); in _ib_modify_qp()
1874 rdma_lag_put_ah_roce_slave(attr->xmit_slave); in _ib_modify_qp()
1875 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av); in _ib_modify_qp()
1881 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1885 * @attr_mask: A bit-mask used to specify which attributes of the QP
1894 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata); in ib_modify_qp_with_udata()
1989 return -EINVAL; in ib_get_eth_speed()
1993 return -ENODEV; in ib_get_eth_speed()
2007 netdev->name, netdev_speed); in ib_get_eth_speed()
2021 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); in ib_modify_qp()
2030 qp_attr->ah_attr.grh.sgid_attr = NULL; in ib_query_qp()
2031 qp_attr->alt_ah_attr.grh.sgid_attr = NULL; in ib_query_qp()
2033 return qp->device->ops.query_qp ? in ib_query_qp()
2034 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask, in ib_query_qp()
2035 qp_init_attr) : -EOPNOTSUPP; in ib_query_qp()
2044 real_qp = qp->real_qp; in ib_close_qp()
2046 return -EINVAL; in ib_close_qp()
2048 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); in ib_close_qp()
2049 list_del(&qp->open_list); in ib_close_qp()
2050 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); in ib_close_qp()
2052 atomic_dec(&real_qp->usecnt); in ib_close_qp()
2053 if (qp->qp_sec) in ib_close_qp()
2054 ib_close_shared_qp_security(qp->qp_sec); in ib_close_qp()
2067 real_qp = qp->real_qp; in __ib_destroy_shared_qp()
2068 xrcd = real_qp->xrcd; in __ib_destroy_shared_qp()
2069 down_write(&xrcd->tgt_qps_rwsem); in __ib_destroy_shared_qp()
2071 if (atomic_read(&real_qp->usecnt) == 0) in __ib_destroy_shared_qp()
2072 xa_erase(&xrcd->tgt_qps, real_qp->qp_num); in __ib_destroy_shared_qp()
2075 up_write(&xrcd->tgt_qps_rwsem); in __ib_destroy_shared_qp()
2080 atomic_dec(&xrcd->usecnt); in __ib_destroy_shared_qp()
2088 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; in ib_destroy_qp_user()
2089 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; in ib_destroy_qp_user()
2093 WARN_ON_ONCE(qp->mrs_used > 0); in ib_destroy_qp_user()
2095 if (atomic_read(&qp->usecnt)) in ib_destroy_qp_user()
2096 return -EBUSY; in ib_destroy_qp_user()
2098 if (qp->real_qp != qp) in ib_destroy_qp_user()
2101 sec = qp->qp_sec; in ib_destroy_qp_user()
2105 if (!qp->uobject) in ib_destroy_qp_user()
2109 ret = qp->device->ops.destroy_qp(qp, udata); in ib_destroy_qp_user()
2125 rdma_restrack_del(&qp->res); in ib_destroy_qp_user()
2145 return ERR_PTR(-ENOMEM); in __ib_create_cq()
2147 cq->device = device; in __ib_create_cq()
2148 cq->uobject = NULL; in __ib_create_cq()
2149 cq->comp_handler = comp_handler; in __ib_create_cq()
2150 cq->event_handler = event_handler; in __ib_create_cq()
2151 cq->cq_context = cq_context; in __ib_create_cq()
2152 atomic_set(&cq->usecnt, 0); in __ib_create_cq()
2154 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); in __ib_create_cq()
2155 rdma_restrack_set_name(&cq->res, caller); in __ib_create_cq()
2157 ret = device->ops.create_cq(cq, cq_attr, NULL); in __ib_create_cq()
2159 rdma_restrack_put(&cq->res); in __ib_create_cq()
2164 rdma_restrack_add(&cq->res); in __ib_create_cq()
2171 if (cq->shared) in rdma_set_cq_moderation()
2172 return -EOPNOTSUPP; in rdma_set_cq_moderation()
2174 return cq->device->ops.modify_cq ? in rdma_set_cq_moderation()
2175 cq->device->ops.modify_cq(cq, cq_count, in rdma_set_cq_moderation()
2176 cq_period) : -EOPNOTSUPP; in rdma_set_cq_moderation()
2184 if (WARN_ON_ONCE(cq->shared)) in ib_destroy_cq_user()
2185 return -EOPNOTSUPP; in ib_destroy_cq_user()
2187 if (atomic_read(&cq->usecnt)) in ib_destroy_cq_user()
2188 return -EBUSY; in ib_destroy_cq_user()
2190 ret = cq->device->ops.destroy_cq(cq, udata); in ib_destroy_cq_user()
2194 rdma_restrack_del(&cq->res); in ib_destroy_cq_user()
2202 if (cq->shared) in ib_resize_cq()
2203 return -EOPNOTSUPP; in ib_resize_cq()
2205 return cq->device->ops.resize_cq ? in ib_resize_cq()
2206 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; in ib_resize_cq()
2218 if (!(pd->device->attrs.kernel_cap_flags & in ib_reg_user_mr()
2221 return ERR_PTR(-EINVAL); in ib_reg_user_mr()
2225 mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr, in ib_reg_user_mr()
2231 mr->device = pd->device; in ib_reg_user_mr()
2232 mr->type = IB_MR_TYPE_USER; in ib_reg_user_mr()
2233 mr->pd = pd; in ib_reg_user_mr()
2234 mr->dm = NULL; in ib_reg_user_mr()
2235 atomic_inc(&pd->usecnt); in ib_reg_user_mr()
2236 mr->iova = virt_addr; in ib_reg_user_mr()
2237 mr->length = length; in ib_reg_user_mr()
2239 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); in ib_reg_user_mr()
2240 rdma_restrack_parent_name(&mr->res, &pd->res); in ib_reg_user_mr()
2241 rdma_restrack_add(&mr->res); in ib_reg_user_mr()
2250 if (!pd->device->ops.advise_mr) in ib_advise_mr()
2251 return -EOPNOTSUPP; in ib_advise_mr()
2256 return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge, in ib_advise_mr()
2263 struct ib_pd *pd = mr->pd; in ib_dereg_mr_user()
2264 struct ib_dm *dm = mr->dm; in ib_dereg_mr_user()
2265 struct ib_sig_attrs *sig_attrs = mr->sig_attrs; in ib_dereg_mr_user()
2269 rdma_restrack_del(&mr->res); in ib_dereg_mr_user()
2270 ret = mr->device->ops.dereg_mr(mr, udata); in ib_dereg_mr_user()
2272 atomic_dec(&pd->usecnt); in ib_dereg_mr_user()
2274 atomic_dec(&dm->usecnt); in ib_dereg_mr_user()
2283 * ib_alloc_mr() - Allocates a memory region
2284 * @pd: protection domain associated with the region
2299 if (!pd->device->ops.alloc_mr) { in ib_alloc_mr()
2300 mr = ERR_PTR(-EOPNOTSUPP); in ib_alloc_mr()
2306 mr = ERR_PTR(-EINVAL); in ib_alloc_mr()
2310 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); in ib_alloc_mr()
2314 mr->device = pd->device; in ib_alloc_mr()
2315 mr->pd = pd; in ib_alloc_mr()
2316 mr->dm = NULL; in ib_alloc_mr()
2317 mr->uobject = NULL; in ib_alloc_mr()
2318 atomic_inc(&pd->usecnt); in ib_alloc_mr()
2319 mr->need_inval = false; in ib_alloc_mr()
2320 mr->type = mr_type; in ib_alloc_mr()
2321 mr->sig_attrs = NULL; in ib_alloc_mr()
2323 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); in ib_alloc_mr()
2324 rdma_restrack_parent_name(&mr->res, &pd->res); in ib_alloc_mr()
2325 rdma_restrack_add(&mr->res); in ib_alloc_mr()
2333 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2334 * @pd: protection domain associated with the region
2351 if (!pd->device->ops.alloc_mr_integrity || in ib_alloc_mr_integrity()
2352 !pd->device->ops.map_mr_sg_pi) { in ib_alloc_mr_integrity()
2353 mr = ERR_PTR(-EOPNOTSUPP); in ib_alloc_mr_integrity()
2358 mr = ERR_PTR(-EINVAL); in ib_alloc_mr_integrity()
2364 mr = ERR_PTR(-ENOMEM); in ib_alloc_mr_integrity()
2368 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg, in ib_alloc_mr_integrity()
2375 mr->device = pd->device; in ib_alloc_mr_integrity()
2376 mr->pd = pd; in ib_alloc_mr_integrity()
2377 mr->dm = NULL; in ib_alloc_mr_integrity()
2378 mr->uobject = NULL; in ib_alloc_mr_integrity()
2379 atomic_inc(&pd->usecnt); in ib_alloc_mr_integrity()
2380 mr->need_inval = false; in ib_alloc_mr_integrity()
2381 mr->type = IB_MR_TYPE_INTEGRITY; in ib_alloc_mr_integrity()
2382 mr->sig_attrs = sig_attrs; in ib_alloc_mr_integrity()
2384 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); in ib_alloc_mr_integrity()
2385 rdma_restrack_parent_name(&mr->res, &pd->res); in ib_alloc_mr_integrity()
2386 rdma_restrack_add(&mr->res); in ib_alloc_mr_integrity()
2407 if (rdma_port_get_link_layer(qp->device, attr.port_num) != in is_valid_mcast_lid()
2415 rdma_for_each_port(qp->device, port) in is_valid_mcast_lid()
2416 if (rdma_port_get_link_layer(qp->device, port) != in is_valid_mcast_lid()
2437 if (!qp->device->ops.attach_mcast) in ib_attach_mcast()
2438 return -EOPNOTSUPP; in ib_attach_mcast()
2440 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || in ib_attach_mcast()
2441 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) in ib_attach_mcast()
2442 return -EINVAL; in ib_attach_mcast()
2444 ret = qp->device->ops.attach_mcast(qp, gid, lid); in ib_attach_mcast()
2446 atomic_inc(&qp->usecnt); in ib_attach_mcast()
2455 if (!qp->device->ops.detach_mcast) in ib_detach_mcast()
2456 return -EOPNOTSUPP; in ib_detach_mcast()
2458 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || in ib_detach_mcast()
2459 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) in ib_detach_mcast()
2460 return -EINVAL; in ib_detach_mcast()
2462 ret = qp->device->ops.detach_mcast(qp, gid, lid); in ib_detach_mcast()
2464 atomic_dec(&qp->usecnt); in ib_detach_mcast()
2470 * ib_alloc_xrcd_user - Allocates an XRC domain.
2471 * @device: The device on which to allocate the XRC domain.
2481 if (!device->ops.alloc_xrcd) in ib_alloc_xrcd_user()
2482 return ERR_PTR(-EOPNOTSUPP); in ib_alloc_xrcd_user()
2486 return ERR_PTR(-ENOMEM); in ib_alloc_xrcd_user()
2488 xrcd->device = device; in ib_alloc_xrcd_user()
2489 xrcd->inode = inode; in ib_alloc_xrcd_user()
2490 atomic_set(&xrcd->usecnt, 0); in ib_alloc_xrcd_user()
2491 init_rwsem(&xrcd->tgt_qps_rwsem); in ib_alloc_xrcd_user()
2492 xa_init(&xrcd->tgt_qps); in ib_alloc_xrcd_user()
2494 ret = device->ops.alloc_xrcd(xrcd, udata); in ib_alloc_xrcd_user()
2505 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2506 * @xrcd: The XRC domain to deallocate.
2513 if (atomic_read(&xrcd->usecnt)) in ib_dealloc_xrcd_user()
2514 return -EBUSY; in ib_dealloc_xrcd_user()
2516 WARN_ON(!xa_empty(&xrcd->tgt_qps)); in ib_dealloc_xrcd_user()
2517 ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata); in ib_dealloc_xrcd_user()
2526 * ib_create_wq - Creates a WQ associated with the specified protection
2527 * domain.
2528 * @pd: The protection domain associated with the WQ.
2533 * wq_attr->max_wr and wq_attr->max_sge determine
2544 if (!pd->device->ops.create_wq) in ib_create_wq()
2545 return ERR_PTR(-EOPNOTSUPP); in ib_create_wq()
2547 wq = pd->device->ops.create_wq(pd, wq_attr, NULL); in ib_create_wq()
2549 wq->event_handler = wq_attr->event_handler; in ib_create_wq()
2550 wq->wq_context = wq_attr->wq_context; in ib_create_wq()
2551 wq->wq_type = wq_attr->wq_type; in ib_create_wq()
2552 wq->cq = wq_attr->cq; in ib_create_wq()
2553 wq->device = pd->device; in ib_create_wq()
2554 wq->pd = pd; in ib_create_wq()
2555 wq->uobject = NULL; in ib_create_wq()
2556 atomic_inc(&pd->usecnt); in ib_create_wq()
2557 atomic_inc(&wq_attr->cq->usecnt); in ib_create_wq()
2558 atomic_set(&wq->usecnt, 0); in ib_create_wq()
2565 * ib_destroy_wq_user - Destroys the specified user WQ.
2571 struct ib_cq *cq = wq->cq; in ib_destroy_wq_user()
2572 struct ib_pd *pd = wq->pd; in ib_destroy_wq_user()
2575 if (atomic_read(&wq->usecnt)) in ib_destroy_wq_user()
2576 return -EBUSY; in ib_destroy_wq_user()
2578 ret = wq->device->ops.destroy_wq(wq, udata); in ib_destroy_wq_user()
2582 atomic_dec(&pd->usecnt); in ib_destroy_wq_user()
2583 atomic_dec(&cq->usecnt); in ib_destroy_wq_user()
2591 if (!mr->device->ops.check_mr_status) in ib_check_mr_status()
2592 return -EOPNOTSUPP; in ib_check_mr_status()
2594 return mr->device->ops.check_mr_status(mr, check_mask, mr_status); in ib_check_mr_status()
2601 if (!device->ops.set_vf_link_state) in ib_set_vf_link_state()
2602 return -EOPNOTSUPP; in ib_set_vf_link_state()
2604 return device->ops.set_vf_link_state(device, vf, port, state); in ib_set_vf_link_state()
2611 if (!device->ops.get_vf_config) in ib_get_vf_config()
2612 return -EOPNOTSUPP; in ib_get_vf_config()
2614 return device->ops.get_vf_config(device, vf, port, info); in ib_get_vf_config()
2621 if (!device->ops.get_vf_stats) in ib_get_vf_stats()
2622 return -EOPNOTSUPP; in ib_get_vf_stats()
2624 return device->ops.get_vf_stats(device, vf, port, stats); in ib_get_vf_stats()
2631 if (!device->ops.set_vf_guid) in ib_set_vf_guid()
2632 return -EOPNOTSUPP; in ib_set_vf_guid()
2634 return device->ops.set_vf_guid(device, vf, port, guid, type); in ib_set_vf_guid()
2642 if (!device->ops.get_vf_guid) in ib_get_vf_guid()
2643 return -EOPNOTSUPP; in ib_get_vf_guid()
2645 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid); in ib_get_vf_guid()
2649 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2661 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2673 if (unlikely(!mr->device->ops.map_mr_sg_pi || in ib_map_mr_sg_pi()
2674 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY))) in ib_map_mr_sg_pi()
2675 return -EOPNOTSUPP; in ib_map_mr_sg_pi()
2677 mr->page_size = page_size; in ib_map_mr_sg_pi()
2679 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents, in ib_map_mr_sg_pi()
2686 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2696 * - The first sg element is allowed to have an offset.
2697 * - Each sg element must either be aligned to page_size or virtually
2699 * non-contiguous offset, the mapping prefix will not include it.
2700 * - The last sg element is allowed to have length less than page_size.
2701 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2703 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2714 if (unlikely(!mr->device->ops.map_mr_sg)) in ib_map_mr_sg()
2715 return -EOPNOTSUPP; in ib_map_mr_sg()
2717 mr->page_size = page_size; in ib_map_mr_sg()
2719 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset); in ib_map_mr_sg()
2724 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2752 u64 page_mask = ~((u64)mr->page_size - 1); in ib_sg_to_pages()
2756 return -EINVAL; in ib_sg_to_pages()
2758 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; in ib_sg_to_pages()
2759 mr->length = 0; in ib_sg_to_pages()
2764 unsigned int dma_len = sg_dma_len(sg) - sg_offset; in ib_sg_to_pages()
2770 * end of element i-1 or the start of element i is not aligned in ib_sg_to_pages()
2780 * enough just update mr->length. Otherwise start in ib_sg_to_pages()
2789 sg_offset = prev_addr - sg_dma_address(sg); in ib_sg_to_pages()
2790 mr->length += prev_addr - dma_addr; in ib_sg_to_pages()
2797 page_addr += mr->page_size; in ib_sg_to_pages()
2800 mr->length += dma_len; in ib_sg_to_pages()
2820 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, in ib_drain_qp_done()
2823 complete(&cqe->done); in ib_drain_qp_done()
2831 struct ib_cq *cq = qp->send_cq; in __ib_drain_sq()
2858 if (cq->poll_ctx == IB_POLL_DIRECT) in __ib_drain_sq()
2860 ib_process_cq_direct(cq, -1); in __ib_drain_sq()
2870 struct ib_cq *cq = qp->recv_cq; in __ib_drain_rq()
2892 if (cq->poll_ctx == IB_POLL_DIRECT) in __ib_drain_rq()
2894 ib_process_cq_direct(cq, -1); in __ib_drain_rq()
2900 * __ib_drain_srq() - Block until Last WQE Reached event arrives, or timeout
2915 * - Put the QP in the Error State
2916 * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
2917 * - either:
2921 * - or
2924 * - and then invoke a Destroy QP or Reset QP.
2935 if (!qp->srq) { in __ib_drain_srq()
2946 if (ib_srq_has_cq(qp->srq->srq_type)) { in __ib_drain_srq()
2947 cq = qp->srq->ext.cq; in __ib_drain_srq()
2948 } else if (qp->recv_cq) { in __ib_drain_srq()
2949 cq = qp->recv_cq; in __ib_drain_srq()
2955 if (wait_for_completion_timeout(&qp->srq_completion, 60 * HZ) > 0) { in __ib_drain_srq()
2956 while (polled != cq->cqe) { in __ib_drain_srq()
2957 n = ib_process_cq_direct(cq, cq->cqe - polled); in __ib_drain_srq()
2966 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2970 * If the device has a provider-specific drain function, then
2986 if (qp->device->ops.drain_sq) in ib_drain_sq()
2987 qp->device->ops.drain_sq(qp); in ib_drain_sq()
2990 trace_cq_drain_complete(qp->send_cq); in ib_drain_sq()
2995 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2999 * If the device has a provider-specific drain function, then
3015 if (qp->device->ops.drain_rq) in ib_drain_rq()
3016 qp->device->ops.drain_rq(qp); in ib_drain_rq()
3019 trace_cq_drain_complete(qp->recv_cq); in ib_drain_rq()
3024 * ib_drain_qp() - Block until all CQEs have been consumed by the
3041 if (!qp->srq) in ib_drain_qp()
3057 if (!device->ops.rdma_netdev_get_params) in rdma_alloc_netdev()
3058 return ERR_PTR(-EOPNOTSUPP); in rdma_alloc_netdev()
3060 rc = device->ops.rdma_netdev_get_params(device, port_num, type, in rdma_alloc_netdev()
3068 return ERR_PTR(-ENOMEM); in rdma_alloc_netdev()
3083 if (!device->ops.rdma_netdev_get_params) in rdma_init_netdev()
3084 return -EOPNOTSUPP; in rdma_init_netdev()
3086 rc = device->ops.rdma_netdev_get_params(device, port_num, type, in rdma_init_netdev()
3101 biter->__sg = sglist; in __rdma_block_iter_start()
3102 biter->__sg_nents = nents; in __rdma_block_iter_start()
3105 biter->__pg_bit = __fls(pgsz); in __rdma_block_iter_start()
3114 if (!biter->__sg_nents || !biter->__sg) in __rdma_block_iter_next()
3117 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; in __rdma_block_iter_next()
3118 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); in __rdma_block_iter_next()
3119 sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; in __rdma_block_iter_next()
3121 if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { in __rdma_block_iter_next()
3122 biter->__sg_advance += sg_delta; in __rdma_block_iter_next()
3124 biter->__sg_advance = 0; in __rdma_block_iter_next()
3125 biter->__sg = sg_next(biter->__sg); in __rdma_block_iter_next()
3126 biter->__sg_nents--; in __rdma_block_iter_next()
3134 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
3150 stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters), in rdma_alloc_hw_stats_struct()
3151 sizeof(*stats->is_disabled), GFP_KERNEL); in rdma_alloc_hw_stats_struct()
3152 if (!stats->is_disabled) in rdma_alloc_hw_stats_struct()
3155 stats->descs = descs; in rdma_alloc_hw_stats_struct()
3156 stats->num_counters = num_counters; in rdma_alloc_hw_stats_struct()
3157 stats->lifespan = msecs_to_jiffies(lifespan); in rdma_alloc_hw_stats_struct()
3158 mutex_init(&stats->lock); in rdma_alloc_hw_stats_struct()
3169 * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
3177 kfree(stats->is_disabled); in rdma_free_hw_stats_struct()