Lines Matching refs:port_priv

95 					struct ib_mad_port_private *port_priv,
107 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
228 struct ib_mad_port_private *port_priv; in ib_register_mad_agent() local
347 port_priv = ib_get_mad_port(device, port_num); in ib_register_mad_agent()
348 if (!port_priv) { in ib_register_mad_agent()
358 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
381 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
388 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
424 spin_lock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
428 class = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
442 vendor = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
461 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
466 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
487 struct ib_mad_port_private *port_priv; in unregister_mad_agent() local
497 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
500 spin_lock_irq(&port_priv->reg_lock); in unregister_mad_agent()
502 spin_unlock_irq(&port_priv->reg_lock); in unregister_mad_agent()
505 flush_workqueue(port_priv->wq); in unregister_mad_agent()
578 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) in port_mad_size() argument
580 return rdma_max_mad_size(port_priv->device, port_priv->port_num); in port_mad_size()
602 struct ib_mad_port_private *port_priv; in handle_outgoing_dr_smp() local
608 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
611 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
612 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
723 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, in handle_outgoing_dr_smp()
725 if (port_priv) { in handle_outgoing_dr_smp()
727 recv_mad_agent = find_mad_agent(port_priv, in handle_outgoing_dr_smp()
730 if (!port_priv || !recv_mad_agent) { in handle_outgoing_dr_smp()
759 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1279 struct ib_mad_port_private *port_priv; in add_nonoui_reg_req() local
1284 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1285 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; in add_nonoui_reg_req()
1338 struct ib_mad_port_private *port_priv; in add_oui_reg_req() local
1348 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1349 vendor_table = &port_priv->version[ in add_oui_reg_req()
1435 struct ib_mad_port_private *port_priv; in remove_mad_reg_req() local
1450 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1452 class = port_priv->version[ in remove_mad_reg_req()
1470 port_priv->version[ in remove_mad_reg_req()
1483 vendor = port_priv->version[ in remove_mad_reg_req()
1515 port_priv->version[ in remove_mad_reg_req()
1530 find_mad_agent(struct ib_mad_port_private *port_priv, in find_mad_agent() argument
1557 spin_lock_irqsave(&port_priv->reg_lock, flags); in find_mad_agent()
1565 class = port_priv->version[ in find_mad_agent()
1578 vendor = port_priv->version[ in find_mad_agent()
1600 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in find_mad_agent()
1604 dev_notice(&port_priv->device->dev, in find_mad_agent()
1606 &mad_agent->agent, port_priv->port_num); in find_mad_agent()
1856 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, in handle_ib_smi() argument
1869 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
1871 port_priv->device->phys_port_cnt) == in handle_ib_smi()
1881 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
1885 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) in handle_ib_smi()
1887 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_ib_smi()
1896 port_priv->device, in handle_ib_smi()
1943 handle_opa_smi(struct ib_mad_port_private *port_priv, in handle_opa_smi() argument
1956 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
1958 port_priv->device->phys_port_cnt) == in handle_opa_smi()
1968 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
1972 if (opa_smi_check_local_smp(smp, port_priv->device) == in handle_opa_smi()
1976 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_opa_smi()
1986 port_priv->device, in handle_opa_smi()
1999 handle_smi(struct ib_mad_port_private *port_priv, in handle_smi() argument
2011 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, in handle_smi()
2014 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); in handle_smi()
2019 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_recv_done() local
2032 if (list_empty_careful(&port_priv->port_list)) in ib_mad_recv_done()
2046 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done()
2047 qp_info->port_priv->port_num); in ib_mad_recv_done()
2052 ib_dma_unmap_single(port_priv->device, in ib_mad_recv_done()
2084 if (rdma_cap_ib_switch(port_priv->device)) in ib_mad_recv_done()
2087 port_num = port_priv->port_num; in ib_mad_recv_done()
2091 if (handle_smi(port_priv, qp_info, wc, port_num, recv, in ib_mad_recv_done()
2098 if (port_priv->device->ops.process_mad) { in ib_mad_recv_done()
2099 ret = port_priv->device->ops.process_mad( in ib_mad_recv_done()
2100 port_priv->device, 0, port_priv->port_num, wc, in ib_mad_recv_done()
2114 port_priv->device, in ib_mad_recv_done()
2123 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); in ib_mad_recv_done()
2135 port_priv->device, port_num, in ib_mad_recv_done()
2166 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2202 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2268 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_send_done() local
2278 if (list_empty_careful(&port_priv->port_list)) in ib_mad_send_done()
2282 if (!ib_mad_send_error(port_priv, wc)) in ib_mad_send_done()
2326 dev_err(&port_priv->device->dev, in ib_mad_send_done()
2351 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, in ib_mad_send_error() argument
2388 dev_err(&port_priv->device->dev, in ib_mad_send_error()
2507 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2508 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2641 port_priv->wq, in timeout_sends()
2681 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2694 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2702 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2706 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2728 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2733 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2767 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
2780 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) in ib_mad_port_start() argument
2791 ret = ib_find_pkey(port_priv->device, port_priv->port_num, in ib_mad_port_start()
2797 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
2811 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2820 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2830 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2837 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_port_start()
2839 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2846 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
2849 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
2851 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2866 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
2880 static void init_mad_qp(struct ib_mad_port_private *port_priv, in init_mad_qp() argument
2883 qp_info->port_priv = port_priv; in init_mad_qp()
2896 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
2897 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
2904 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
2907 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
2909 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
2940 struct ib_mad_port_private *port_priv; in ib_mad_port_open() local
2952 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); in ib_mad_port_open()
2953 if (!port_priv) in ib_mad_port_open()
2956 port_priv->device = device; in ib_mad_port_open()
2957 port_priv->port_num = port_num; in ib_mad_port_open()
2958 spin_lock_init(&port_priv->reg_lock); in ib_mad_port_open()
2959 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
2960 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
2967 port_priv->pd = ib_alloc_pd(device, 0); in ib_mad_port_open()
2968 if (IS_ERR(port_priv->pd)) { in ib_mad_port_open()
2970 ret = PTR_ERR(port_priv->pd); in ib_mad_port_open()
2974 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, in ib_mad_port_open()
2976 if (IS_ERR(port_priv->cq)) { in ib_mad_port_open()
2978 ret = PTR_ERR(port_priv->cq); in ib_mad_port_open()
2983 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
2989 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
2994 port_priv->wq = alloc_ordered_workqueue("ib_mad%u", WQ_MEM_RECLAIM, in ib_mad_port_open()
2996 if (!port_priv->wq) { in ib_mad_port_open()
3002 list_add_tail(&port_priv->port_list, &ib_mad_port_list); in ib_mad_port_open()
3005 ret = ib_mad_port_start(port_priv); in ib_mad_port_open()
3015 list_del_init(&port_priv->port_list); in ib_mad_port_open()
3018 destroy_workqueue(port_priv->wq); in ib_mad_port_open()
3020 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3022 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3024 ib_free_cq(port_priv->cq); in ib_mad_port_open()
3025 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3026 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3028 ib_dealloc_pd(port_priv->pd); in ib_mad_port_open()
3030 kfree(port_priv); in ib_mad_port_open()
3042 struct ib_mad_port_private *port_priv; in ib_mad_port_close() local
3046 port_priv = __ib_get_mad_port(device, port_num); in ib_mad_port_close()
3047 if (port_priv == NULL) { in ib_mad_port_close()
3052 list_del_init(&port_priv->port_list); in ib_mad_port_close()
3055 destroy_workqueue(port_priv->wq); in ib_mad_port_close()
3056 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3057 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3058 ib_free_cq(port_priv->cq); in ib_mad_port_close()
3059 ib_dealloc_pd(port_priv->pd); in ib_mad_port_close()
3060 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3061 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()
3064 kfree(port_priv); in ib_mad_port_close()