1  /*
2   * Broadcom NetXtreme-E RoCE driver.
3   *
4   * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5   * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6   *
7   * This software is available to you under a choice of one of two
8   * licenses.  You may choose to be licensed under the terms of the GNU
9   * General Public License (GPL) Version 2, available from the file
10   * COPYING in the main directory of this source tree, or the
11   * BSD license below:
12   *
13   * Redistribution and use in source and binary forms, with or without
14   * modification, are permitted provided that the following conditions
15   * are met:
16   *
17   * 1. Redistributions of source code must retain the above copyright
18   *    notice, this list of conditions and the following disclaimer.
19   * 2. Redistributions in binary form must reproduce the above copyright
20   *    notice, this list of conditions and the following disclaimer in
21   *    the documentation and/or other materials provided with the
22   *    distribution.
23   *
24   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25   * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26   * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27   * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28   * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31   * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32   * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33   * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34   * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35   *
36   * Description: IB Verbs interpreter
37   */
38  
39  #include <linux/interrupt.h>
40  #include <linux/types.h>
41  #include <linux/pci.h>
42  #include <linux/netdevice.h>
43  #include <linux/if_ether.h>
44  #include <net/addrconf.h>
45  
46  #include <rdma/ib_verbs.h>
47  #include <rdma/ib_user_verbs.h>
48  #include <rdma/ib_umem.h>
49  #include <rdma/ib_addr.h>
50  #include <rdma/ib_mad.h>
51  #include <rdma/ib_cache.h>
52  #include <rdma/uverbs_ioctl.h>
53  #include <linux/hashtable.h>
54  
55  #include "bnxt_ulp.h"
56  
57  #include "roce_hsi.h"
58  #include "qplib_res.h"
59  #include "qplib_sp.h"
60  #include "qplib_fp.h"
61  #include "qplib_rcfw.h"
62  
63  #include "bnxt_re.h"
64  #include "ib_verbs.h"
65  
66  #include <rdma/uverbs_types.h>
67  #include <rdma/uverbs_std_types.h>
68  
69  #include <rdma/ib_user_ioctl_cmds.h>
70  
71  #define UVERBS_MODULE_NAME bnxt_re
72  #include <rdma/uverbs_named_ioctl.h>
73  
74  #include <rdma/bnxt_re-abi.h>
75  
__from_ib_access_flags(int iflags)76  static int __from_ib_access_flags(int iflags)
77  {
78  	int qflags = 0;
79  
80  	if (iflags & IB_ACCESS_LOCAL_WRITE)
81  		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
82  	if (iflags & IB_ACCESS_REMOTE_READ)
83  		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
84  	if (iflags & IB_ACCESS_REMOTE_WRITE)
85  		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
86  	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
87  		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
88  	if (iflags & IB_ACCESS_MW_BIND)
89  		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
90  	if (iflags & IB_ZERO_BASED)
91  		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
92  	if (iflags & IB_ACCESS_ON_DEMAND)
93  		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
94  	return qflags;
95  };
96  
__to_ib_access_flags(int qflags)97  static enum ib_access_flags __to_ib_access_flags(int qflags)
98  {
99  	enum ib_access_flags iflags = 0;
100  
101  	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
102  		iflags |= IB_ACCESS_LOCAL_WRITE;
103  	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
104  		iflags |= IB_ACCESS_REMOTE_WRITE;
105  	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
106  		iflags |= IB_ACCESS_REMOTE_READ;
107  	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
108  		iflags |= IB_ACCESS_REMOTE_ATOMIC;
109  	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
110  		iflags |= IB_ACCESS_MW_BIND;
111  	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
112  		iflags |= IB_ZERO_BASED;
113  	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
114  		iflags |= IB_ACCESS_ON_DEMAND;
115  	return iflags;
116  };
117  
bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev * rdev,struct bnxt_qplib_mrw * qplib_mr)118  static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
119  						   struct bnxt_qplib_mrw *qplib_mr)
120  {
121  	if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
122  	    pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
123  		qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
124  }
125  
bnxt_re_build_sgl(struct ib_sge * ib_sg_list,struct bnxt_qplib_sge * sg_list,int num)126  static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
127  			     struct bnxt_qplib_sge *sg_list, int num)
128  {
129  	int i, total = 0;
130  
131  	for (i = 0; i < num; i++) {
132  		sg_list[i].addr = ib_sg_list[i].addr;
133  		sg_list[i].lkey = ib_sg_list[i].lkey;
134  		sg_list[i].size = ib_sg_list[i].length;
135  		total += sg_list[i].size;
136  	}
137  	return total;
138  }
139  
140  /* Device */
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)141  int bnxt_re_query_device(struct ib_device *ibdev,
142  			 struct ib_device_attr *ib_attr,
143  			 struct ib_udata *udata)
144  {
145  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
146  	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
147  
148  	memset(ib_attr, 0, sizeof(*ib_attr));
149  	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
150  	       min(sizeof(dev_attr->fw_ver),
151  		   sizeof(ib_attr->fw_ver)));
152  	addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
153  			    rdev->netdev->dev_addr);
154  	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
155  	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
156  
157  	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
158  	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
159  	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
160  	ib_attr->max_qp = dev_attr->max_qp;
161  	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
162  	ib_attr->device_cap_flags =
163  				    IB_DEVICE_CURR_QP_STATE_MOD
164  				    | IB_DEVICE_RC_RNR_NAK_GEN
165  				    | IB_DEVICE_SHUTDOWN_PORT
166  				    | IB_DEVICE_SYS_IMAGE_GUID
167  				    | IB_DEVICE_RESIZE_MAX_WR
168  				    | IB_DEVICE_PORT_ACTIVE_EVENT
169  				    | IB_DEVICE_N_NOTIFY_CQ
170  				    | IB_DEVICE_MEM_WINDOW
171  				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
172  				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
173  	ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
174  	ib_attr->max_send_sge = dev_attr->max_qp_sges;
175  	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
176  	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
177  	ib_attr->max_cq = dev_attr->max_cq;
178  	ib_attr->max_cqe = dev_attr->max_cq_wqes;
179  	ib_attr->max_mr = dev_attr->max_mr;
180  	ib_attr->max_pd = dev_attr->max_pd;
181  	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
182  	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
183  	ib_attr->atomic_cap = IB_ATOMIC_NONE;
184  	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
185  	if (dev_attr->is_atomic) {
186  		ib_attr->atomic_cap = IB_ATOMIC_GLOB;
187  		ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
188  	}
189  
190  	ib_attr->max_ee_rd_atom = 0;
191  	ib_attr->max_res_rd_atom = 0;
192  	ib_attr->max_ee_init_rd_atom = 0;
193  	ib_attr->max_ee = 0;
194  	ib_attr->max_rdd = 0;
195  	ib_attr->max_mw = dev_attr->max_mw;
196  	ib_attr->max_raw_ipv6_qp = 0;
197  	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
198  	ib_attr->max_mcast_grp = 0;
199  	ib_attr->max_mcast_qp_attach = 0;
200  	ib_attr->max_total_mcast_qp_attach = 0;
201  	ib_attr->max_ah = dev_attr->max_ah;
202  
203  	ib_attr->max_srq = dev_attr->max_srq;
204  	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
205  	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
206  
207  	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
208  
209  	ib_attr->max_pkeys = 1;
210  	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
211  	return 0;
212  }
213  
214  /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u32 port_num,struct ib_port_attr * port_attr)215  int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
216  		       struct ib_port_attr *port_attr)
217  {
218  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
219  	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
220  	int rc;
221  
222  	memset(port_attr, 0, sizeof(*port_attr));
223  
224  	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
225  		port_attr->state = IB_PORT_ACTIVE;
226  		port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
227  	} else {
228  		port_attr->state = IB_PORT_DOWN;
229  		port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
230  	}
231  	port_attr->max_mtu = IB_MTU_4096;
232  	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
233  	port_attr->gid_tbl_len = dev_attr->max_sgid;
234  	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
235  				    IB_PORT_DEVICE_MGMT_SUP |
236  				    IB_PORT_VENDOR_CLASS_SUP;
237  	port_attr->ip_gids = true;
238  
239  	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
240  	port_attr->bad_pkey_cntr = 0;
241  	port_attr->qkey_viol_cntr = 0;
242  	port_attr->pkey_tbl_len = dev_attr->max_pkey;
243  	port_attr->lid = 0;
244  	port_attr->sm_lid = 0;
245  	port_attr->lmc = 0;
246  	port_attr->max_vl_num = 4;
247  	port_attr->sm_sl = 0;
248  	port_attr->subnet_timeout = 0;
249  	port_attr->init_type_reply = 0;
250  	rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
251  			      &port_attr->active_width);
252  
253  	return rc;
254  }
255  
bnxt_re_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)256  int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
257  			       struct ib_port_immutable *immutable)
258  {
259  	struct ib_port_attr port_attr;
260  
261  	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
262  		return -EINVAL;
263  
264  	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
265  	immutable->gid_tbl_len = port_attr.gid_tbl_len;
266  	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
267  	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
268  	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
269  	return 0;
270  }
271  
bnxt_re_query_fw_str(struct ib_device * ibdev,char * str)272  void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
273  {
274  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
275  
276  	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
277  		 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
278  		 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
279  }
280  
bnxt_re_query_pkey(struct ib_device * ibdev,u32 port_num,u16 index,u16 * pkey)281  int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
282  		       u16 index, u16 *pkey)
283  {
284  	if (index > 0)
285  		return -EINVAL;
286  
287  	*pkey = IB_DEFAULT_PKEY_FULL;
288  
289  	return 0;
290  }
291  
bnxt_re_query_gid(struct ib_device * ibdev,u32 port_num,int index,union ib_gid * gid)292  int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
293  		      int index, union ib_gid *gid)
294  {
295  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
296  	int rc;
297  
298  	/* Ignore port_num */
299  	memset(gid, 0, sizeof(*gid));
300  	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
301  				 &rdev->qplib_res.sgid_tbl, index,
302  				 (struct bnxt_qplib_gid *)gid);
303  	return rc;
304  }
305  
bnxt_re_del_gid(const struct ib_gid_attr * attr,void ** context)306  int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
307  {
308  	int rc = 0;
309  	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
310  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
311  	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
312  	struct bnxt_qplib_gid *gid_to_del;
313  	u16 vlan_id = 0xFFFF;
314  
315  	/* Delete the entry from the hardware */
316  	ctx = *context;
317  	if (!ctx)
318  		return -EINVAL;
319  
320  	if (sgid_tbl && sgid_tbl->active) {
321  		if (ctx->idx >= sgid_tbl->max)
322  			return -EINVAL;
323  		gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
324  		vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
325  		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
326  		 * or via the ib_unregister_device path. In the former case QP1
327  		 * may not be destroyed yet, in which case just return as FW
328  		 * needs that entry to be present and will fail it's deletion.
329  		 * We could get invoked again after QP1 is destroyed OR get an
330  		 * ADD_GID call with a different GID value for the same index
331  		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
332  		 */
333  		if (ctx->idx == 0 &&
334  		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
335  		    ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
336  			ibdev_dbg(&rdev->ibdev,
337  				  "Trying to delete GID0 while QP1 is alive\n");
338  			return -EFAULT;
339  		}
340  		ctx->refcnt--;
341  		if (!ctx->refcnt) {
342  			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
343  						 vlan_id,  true);
344  			if (rc) {
345  				ibdev_err(&rdev->ibdev,
346  					  "Failed to remove GID: %#x", rc);
347  			} else {
348  				ctx_tbl = sgid_tbl->ctx;
349  				ctx_tbl[ctx->idx] = NULL;
350  				kfree(ctx);
351  			}
352  		}
353  	} else {
354  		return -EINVAL;
355  	}
356  	return rc;
357  }
358  
bnxt_re_add_gid(const struct ib_gid_attr * attr,void ** context)359  int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
360  {
361  	int rc;
362  	u32 tbl_idx = 0;
363  	u16 vlan_id = 0xFFFF;
364  	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
365  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
366  	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
367  
368  	rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
369  	if (rc)
370  		return rc;
371  
372  	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
373  				 rdev->qplib_res.netdev->dev_addr,
374  				 vlan_id, true, &tbl_idx);
375  	if (rc == -EALREADY) {
376  		ctx_tbl = sgid_tbl->ctx;
377  		ctx_tbl[tbl_idx]->refcnt++;
378  		*context = ctx_tbl[tbl_idx];
379  		return 0;
380  	}
381  
382  	if (rc < 0) {
383  		ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
384  		return rc;
385  	}
386  
387  	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
388  	if (!ctx)
389  		return -ENOMEM;
390  	ctx_tbl = sgid_tbl->ctx;
391  	ctx->idx = tbl_idx;
392  	ctx->refcnt = 1;
393  	ctx_tbl[tbl_idx] = ctx;
394  	*context = ctx;
395  
396  	return rc;
397  }
398  
bnxt_re_get_link_layer(struct ib_device * ibdev,u32 port_num)399  enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
400  					    u32 port_num)
401  {
402  	return IB_LINK_LAYER_ETHERNET;
403  }
404  
405  #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
406  
bnxt_re_create_fence_wqe(struct bnxt_re_pd * pd)407  static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
408  {
409  	struct bnxt_re_fence_data *fence = &pd->fence;
410  	struct ib_mr *ib_mr = &fence->mr->ib_mr;
411  	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
412  	struct bnxt_re_dev *rdev = pd->rdev;
413  
414  	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
415  		return;
416  
417  	memset(wqe, 0, sizeof(*wqe));
418  	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
419  	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
420  	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
421  	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
422  	wqe->bind.zero_based = false;
423  	wqe->bind.parent_l_key = ib_mr->lkey;
424  	wqe->bind.va = (u64)(unsigned long)fence->va;
425  	wqe->bind.length = fence->size;
426  	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
427  	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
428  
429  	/* Save the initial rkey in fence structure for now;
430  	 * wqe->bind.r_key will be set at (re)bind time.
431  	 */
432  	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
433  }
434  
bnxt_re_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)435  static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
436  {
437  	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
438  					     qplib_qp);
439  	struct ib_pd *ib_pd = qp->ib_qp.pd;
440  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
441  	struct bnxt_re_fence_data *fence = &pd->fence;
442  	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
443  	struct bnxt_qplib_swqe wqe;
444  	int rc;
445  
446  	memcpy(&wqe, fence_wqe, sizeof(wqe));
447  	wqe.bind.r_key = fence->bind_rkey;
448  	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
449  
450  	ibdev_dbg(&qp->rdev->ibdev,
451  		  "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
452  		wqe.bind.r_key, qp->qplib_qp.id, pd);
453  	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
454  	if (rc) {
455  		ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
456  		return rc;
457  	}
458  	bnxt_qplib_post_send_db(&qp->qplib_qp);
459  
460  	return rc;
461  }
462  
bnxt_re_destroy_fence_mr(struct bnxt_re_pd * pd)463  static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
464  {
465  	struct bnxt_re_fence_data *fence = &pd->fence;
466  	struct bnxt_re_dev *rdev = pd->rdev;
467  	struct device *dev = &rdev->en_dev->pdev->dev;
468  	struct bnxt_re_mr *mr = fence->mr;
469  
470  	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
471  		return;
472  
473  	if (fence->mw) {
474  		bnxt_re_dealloc_mw(fence->mw);
475  		fence->mw = NULL;
476  	}
477  	if (mr) {
478  		if (mr->ib_mr.rkey)
479  			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
480  					     true);
481  		if (mr->ib_mr.lkey)
482  			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
483  		kfree(mr);
484  		fence->mr = NULL;
485  	}
486  	if (fence->dma_addr) {
487  		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
488  				 DMA_BIDIRECTIONAL);
489  		fence->dma_addr = 0;
490  	}
491  }
492  
bnxt_re_create_fence_mr(struct bnxt_re_pd * pd)493  static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
494  {
495  	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
496  	struct bnxt_re_fence_data *fence = &pd->fence;
497  	struct bnxt_re_dev *rdev = pd->rdev;
498  	struct device *dev = &rdev->en_dev->pdev->dev;
499  	struct bnxt_re_mr *mr = NULL;
500  	dma_addr_t dma_addr = 0;
501  	struct ib_mw *mw;
502  	int rc;
503  
504  	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
505  		return 0;
506  
507  	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
508  				  DMA_BIDIRECTIONAL);
509  	rc = dma_mapping_error(dev, dma_addr);
510  	if (rc) {
511  		ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
512  		rc = -EIO;
513  		fence->dma_addr = 0;
514  		goto fail;
515  	}
516  	fence->dma_addr = dma_addr;
517  
518  	/* Allocate a MR */
519  	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
520  	if (!mr) {
521  		rc = -ENOMEM;
522  		goto fail;
523  	}
524  	fence->mr = mr;
525  	mr->rdev = rdev;
526  	mr->qplib_mr.pd = &pd->qplib_pd;
527  	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
528  	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
529  	if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
530  		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
531  		if (rc) {
532  			ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
533  			goto fail;
534  		}
535  
536  		/* Register MR */
537  		mr->ib_mr.lkey = mr->qplib_mr.lkey;
538  	} else {
539  		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
540  	}
541  	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
542  	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
543  	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
544  			       BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
545  	if (rc) {
546  		ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
547  		goto fail;
548  	}
549  	mr->ib_mr.rkey = mr->qplib_mr.rkey;
550  
551  	/* Create a fence MW only for kernel consumers */
552  	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
553  	if (IS_ERR(mw)) {
554  		ibdev_err(&rdev->ibdev,
555  			  "Failed to create fence-MW for PD: %p\n", pd);
556  		rc = PTR_ERR(mw);
557  		goto fail;
558  	}
559  	fence->mw = mw;
560  
561  	bnxt_re_create_fence_wqe(pd);
562  	return 0;
563  
564  fail:
565  	bnxt_re_destroy_fence_mr(pd);
566  	return rc;
567  }
568  
569  static struct bnxt_re_user_mmap_entry*
bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext * uctx,u64 mem_offset,enum bnxt_re_mmap_flag mmap_flag,u64 * offset)570  bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
571  			  enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
572  {
573  	struct bnxt_re_user_mmap_entry *entry;
574  	int ret;
575  
576  	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
577  	if (!entry)
578  		return NULL;
579  
580  	entry->mem_offset = mem_offset;
581  	entry->mmap_flag = mmap_flag;
582  	entry->uctx = uctx;
583  
584  	switch (mmap_flag) {
585  	case BNXT_RE_MMAP_SH_PAGE:
586  		ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
587  							&entry->rdma_entry, PAGE_SIZE, 0);
588  		break;
589  	case BNXT_RE_MMAP_UC_DB:
590  	case BNXT_RE_MMAP_WC_DB:
591  	case BNXT_RE_MMAP_DBR_BAR:
592  	case BNXT_RE_MMAP_DBR_PAGE:
593  	case BNXT_RE_MMAP_TOGGLE_PAGE:
594  		ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
595  						  &entry->rdma_entry, PAGE_SIZE);
596  		break;
597  	default:
598  		ret = -EINVAL;
599  		break;
600  	}
601  
602  	if (ret) {
603  		kfree(entry);
604  		return NULL;
605  	}
606  	if (offset)
607  		*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
608  
609  	return entry;
610  }
611  
612  /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd,struct ib_udata * udata)613  int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
614  {
615  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
616  	struct bnxt_re_dev *rdev = pd->rdev;
617  
618  	if (udata) {
619  		rdma_user_mmap_entry_remove(pd->pd_db_mmap);
620  		pd->pd_db_mmap = NULL;
621  	}
622  
623  	bnxt_re_destroy_fence_mr(pd);
624  
625  	if (pd->qplib_pd.id) {
626  		if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
627  					   &rdev->qplib_res.pd_tbl,
628  					   &pd->qplib_pd))
629  			atomic_dec(&rdev->stats.res.pd_count);
630  	}
631  	return 0;
632  }
633  
bnxt_re_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)634  int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
635  {
636  	struct ib_device *ibdev = ibpd->device;
637  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
638  	struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
639  		udata, struct bnxt_re_ucontext, ib_uctx);
640  	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
641  	struct bnxt_re_user_mmap_entry *entry = NULL;
642  	u32 active_pds;
643  	int rc = 0;
644  
645  	pd->rdev = rdev;
646  	if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
647  		ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
648  		rc = -ENOMEM;
649  		goto fail;
650  	}
651  
652  	if (udata) {
653  		struct bnxt_re_pd_resp resp = {};
654  
655  		if (!ucntx->dpi.dbr) {
656  			/* Allocate DPI in alloc_pd to avoid failing of
657  			 * ibv_devinfo and family of application when DPIs
658  			 * are depleted.
659  			 */
660  			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
661  						 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
662  				rc = -ENOMEM;
663  				goto dbfail;
664  			}
665  		}
666  
667  		resp.pdid = pd->qplib_pd.id;
668  		/* Still allow mapping this DBR to the new user PD. */
669  		resp.dpi = ucntx->dpi.dpi;
670  
671  		entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
672  						  BNXT_RE_MMAP_UC_DB, &resp.dbr);
673  
674  		if (!entry) {
675  			rc = -ENOMEM;
676  			goto dbfail;
677  		}
678  
679  		pd->pd_db_mmap = &entry->rdma_entry;
680  
681  		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
682  		if (rc) {
683  			rdma_user_mmap_entry_remove(pd->pd_db_mmap);
684  			rc = -EFAULT;
685  			goto dbfail;
686  		}
687  	}
688  
689  	if (!udata)
690  		if (bnxt_re_create_fence_mr(pd))
691  			ibdev_warn(&rdev->ibdev,
692  				   "Failed to create Fence-MR\n");
693  	active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
694  	if (active_pds > rdev->stats.res.pd_watermark)
695  		rdev->stats.res.pd_watermark = active_pds;
696  
697  	return 0;
698  dbfail:
699  	bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
700  			      &pd->qplib_pd);
701  fail:
702  	return rc;
703  }
704  
705  /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah,u32 flags)706  int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
707  {
708  	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
709  	struct bnxt_re_dev *rdev = ah->rdev;
710  	bool block = true;
711  	int rc;
712  
713  	block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
714  	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
715  	if (BNXT_RE_CHECK_RC(rc)) {
716  		if (rc == -ETIMEDOUT)
717  			rc = 0;
718  		else
719  			goto fail;
720  	}
721  	atomic_dec(&rdev->stats.res.ah_count);
722  fail:
723  	return rc;
724  }
725  
bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)726  static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
727  {
728  	u8 nw_type;
729  
730  	switch (ntype) {
731  	case RDMA_NETWORK_IPV4:
732  		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
733  		break;
734  	case RDMA_NETWORK_IPV6:
735  		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
736  		break;
737  	default:
738  		nw_type = CMDQ_CREATE_AH_TYPE_V1;
739  		break;
740  	}
741  	return nw_type;
742  }
743  
bnxt_re_create_ah(struct ib_ah * ib_ah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)744  int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
745  		      struct ib_udata *udata)
746  {
747  	struct ib_pd *ib_pd = ib_ah->pd;
748  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
749  	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
750  	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
751  	struct bnxt_re_dev *rdev = pd->rdev;
752  	const struct ib_gid_attr *sgid_attr;
753  	struct bnxt_re_gid_ctx *ctx;
754  	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
755  	u32 active_ahs;
756  	u8 nw_type;
757  	int rc;
758  
759  	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
760  		ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
761  		return -EINVAL;
762  	}
763  
764  	ah->rdev = rdev;
765  	ah->qplib_ah.pd = &pd->qplib_pd;
766  
767  	/* Supply the configuration for the HW */
768  	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
769  	       sizeof(union ib_gid));
770  	sgid_attr = grh->sgid_attr;
771  	/* Get the HW context of the GID. The reference
772  	 * of GID table entry is already taken by the caller.
773  	 */
774  	ctx = rdma_read_gid_hw_context(sgid_attr);
775  	ah->qplib_ah.sgid_index = ctx->idx;
776  	ah->qplib_ah.host_sgid_index = grh->sgid_index;
777  	ah->qplib_ah.traffic_class = grh->traffic_class;
778  	ah->qplib_ah.flow_label = grh->flow_label;
779  	ah->qplib_ah.hop_limit = grh->hop_limit;
780  	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
781  
782  	/* Get network header type for this GID */
783  	nw_type = rdma_gid_attr_network_type(sgid_attr);
784  	ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
785  
786  	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
787  	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
788  				  !(init_attr->flags &
789  				    RDMA_CREATE_AH_SLEEPABLE));
790  	if (rc) {
791  		ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
792  		return rc;
793  	}
794  
795  	/* Write AVID to shared page. */
796  	if (udata) {
797  		struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
798  			udata, struct bnxt_re_ucontext, ib_uctx);
799  		unsigned long flag;
800  		u32 *wrptr;
801  
802  		spin_lock_irqsave(&uctx->sh_lock, flag);
803  		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
804  		*wrptr = ah->qplib_ah.id;
805  		wmb(); /* make sure cache is updated. */
806  		spin_unlock_irqrestore(&uctx->sh_lock, flag);
807  	}
808  	active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
809  	if (active_ahs > rdev->stats.res.ah_watermark)
810  		rdev->stats.res.ah_watermark = active_ahs;
811  
812  	return 0;
813  }
814  
bnxt_re_query_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)815  int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
816  {
817  	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
818  
819  	ah_attr->type = ib_ah->type;
820  	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
821  	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
822  	rdma_ah_set_grh(ah_attr, NULL, 0,
823  			ah->qplib_ah.host_sgid_index,
824  			0, ah->qplib_ah.traffic_class);
825  	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
826  	rdma_ah_set_port_num(ah_attr, 1);
827  	rdma_ah_set_static_rate(ah_attr, 0);
828  	return 0;
829  }
830  
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)831  unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
832  	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
833  {
834  	unsigned long flags;
835  
836  	spin_lock_irqsave(&qp->scq->cq_lock, flags);
837  	if (qp->rcq != qp->scq)
838  		spin_lock(&qp->rcq->cq_lock);
839  	else
840  		__acquire(&qp->rcq->cq_lock);
841  
842  	return flags;
843  }
844  
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)845  void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
846  			unsigned long flags)
847  	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
848  {
849  	if (qp->rcq != qp->scq)
850  		spin_unlock(&qp->rcq->cq_lock);
851  	else
852  		__release(&qp->rcq->cq_lock);
853  	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
854  }
855  
bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp * qp)856  static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
857  {
858  	struct bnxt_re_qp *gsi_sqp;
859  	struct bnxt_re_ah *gsi_sah;
860  	struct bnxt_re_dev *rdev;
861  	int rc;
862  
863  	rdev = qp->rdev;
864  	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
865  	gsi_sah = rdev->gsi_ctx.gsi_sah;
866  
867  	ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
868  	bnxt_qplib_destroy_ah(&rdev->qplib_res,
869  			      &gsi_sah->qplib_ah,
870  			      true);
871  	atomic_dec(&rdev->stats.res.ah_count);
872  	bnxt_qplib_clean_qp(&qp->qplib_qp);
873  
874  	ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
875  	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
876  	if (rc) {
877  		ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
878  		goto fail;
879  	}
880  	bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
881  
882  	/* remove from active qp list */
883  	mutex_lock(&rdev->qp_lock);
884  	list_del(&gsi_sqp->list);
885  	mutex_unlock(&rdev->qp_lock);
886  	atomic_dec(&rdev->stats.res.qp_count);
887  
888  	kfree(rdev->gsi_ctx.sqp_tbl);
889  	kfree(gsi_sah);
890  	kfree(gsi_sqp);
891  	rdev->gsi_ctx.gsi_sqp = NULL;
892  	rdev->gsi_ctx.gsi_sah = NULL;
893  	rdev->gsi_ctx.sqp_tbl = NULL;
894  
895  	return 0;
896  fail:
897  	return rc;
898  }
899  
900  /* Queue Pairs */
bnxt_re_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)901  int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
902  {
903  	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
904  	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
905  	struct bnxt_re_dev *rdev = qp->rdev;
906  	struct bnxt_qplib_nq *scq_nq = NULL;
907  	struct bnxt_qplib_nq *rcq_nq = NULL;
908  	unsigned int flags;
909  	int rc;
910  
911  	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
912  
913  	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
914  	if (rc) {
915  		ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
916  		return rc;
917  	}
918  
919  	if (rdma_is_kernel_res(&qp->ib_qp.res)) {
920  		flags = bnxt_re_lock_cqs(qp);
921  		bnxt_qplib_clean_qp(&qp->qplib_qp);
922  		bnxt_re_unlock_cqs(qp, flags);
923  	}
924  
925  	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
926  
927  	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
928  		rc = bnxt_re_destroy_gsi_sqp(qp);
929  		if (rc)
930  			return rc;
931  	}
932  
933  	mutex_lock(&rdev->qp_lock);
934  	list_del(&qp->list);
935  	mutex_unlock(&rdev->qp_lock);
936  	atomic_dec(&rdev->stats.res.qp_count);
937  	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
938  		atomic_dec(&rdev->stats.res.rc_qp_count);
939  	else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
940  		atomic_dec(&rdev->stats.res.ud_qp_count);
941  
942  	ib_umem_release(qp->rumem);
943  	ib_umem_release(qp->sumem);
944  
945  	/* Flush all the entries of notification queue associated with
946  	 * given qp.
947  	 */
948  	scq_nq = qplib_qp->scq->nq;
949  	rcq_nq = qplib_qp->rcq->nq;
950  	bnxt_re_synchronize_nq(scq_nq);
951  	if (scq_nq != rcq_nq)
952  		bnxt_re_synchronize_nq(rcq_nq);
953  
954  	return 0;
955  }
956  
__from_ib_qp_type(enum ib_qp_type type)957  static u8 __from_ib_qp_type(enum ib_qp_type type)
958  {
959  	switch (type) {
960  	case IB_QPT_GSI:
961  		return CMDQ_CREATE_QP1_TYPE_GSI;
962  	case IB_QPT_RC:
963  		return CMDQ_CREATE_QP_TYPE_RC;
964  	case IB_QPT_UD:
965  		return CMDQ_CREATE_QP_TYPE_UD;
966  	default:
967  		return IB_QPT_MAX;
968  	}
969  }
970  
bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp * qplqp,int rsge,int max)971  static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
972  				   int rsge, int max)
973  {
974  	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
975  		rsge = max;
976  	return bnxt_re_get_rwqe_size(rsge);
977  }
978  
bnxt_re_get_wqe_size(int ilsize,int nsge)979  static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
980  {
981  	u16 wqe_size, calc_ils;
982  
983  	wqe_size = bnxt_re_get_swqe_size(nsge);
984  	if (ilsize) {
985  		calc_ils = sizeof(struct sq_send_hdr) + ilsize;
986  		wqe_size = max_t(u16, calc_ils, wqe_size);
987  		wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
988  	}
989  	return wqe_size;
990  }
991  
bnxt_re_setup_swqe_size(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)992  static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
993  				   struct ib_qp_init_attr *init_attr)
994  {
995  	struct bnxt_qplib_dev_attr *dev_attr;
996  	struct bnxt_qplib_qp *qplqp;
997  	struct bnxt_re_dev *rdev;
998  	struct bnxt_qplib_q *sq;
999  	int align, ilsize;
1000  
1001  	rdev = qp->rdev;
1002  	qplqp = &qp->qplib_qp;
1003  	sq = &qplqp->sq;
1004  	dev_attr = &rdev->dev_attr;
1005  
1006  	align = sizeof(struct sq_send_hdr);
1007  	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
1008  
1009  	/* For gen p4 and gen p5 fixed wqe compatibility mode
1010  	 * wqe size is fixed to 128 bytes - ie 6 SGEs
1011  	 */
1012  	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
1013  		sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
1014  		sq->max_sge = BNXT_STATIC_MAX_SGE;
1015  	} else {
1016  		sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
1017  		if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
1018  			return -EINVAL;
1019  	}
1020  
1021  	if (init_attr->cap.max_inline_data) {
1022  		qplqp->max_inline_data = sq->wqe_size -
1023  			sizeof(struct sq_send_hdr);
1024  		init_attr->cap.max_inline_data = qplqp->max_inline_data;
1025  	}
1026  
1027  	return 0;
1028  }
1029  
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct bnxt_re_ucontext * cntx,struct bnxt_re_qp_req * ureq)1030  static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1031  				struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
1032  				struct bnxt_re_qp_req *ureq)
1033  {
1034  	struct bnxt_qplib_qp *qplib_qp;
1035  	int bytes = 0, psn_sz;
1036  	struct ib_umem *umem;
1037  	int psn_nume;
1038  
1039  	qplib_qp = &qp->qplib_qp;
1040  
1041  	bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1042  	/* Consider mapping PSN search memory only for RC QPs. */
1043  	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1044  		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
1045  						   sizeof(struct sq_psn_search_ext) :
1046  						   sizeof(struct sq_psn_search);
1047  		if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
1048  			psn_nume = ureq->sq_slots;
1049  		} else {
1050  			psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1051  			qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1052  				 sizeof(struct bnxt_qplib_sge));
1053  		}
1054  		if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
1055  			psn_nume = roundup_pow_of_two(psn_nume);
1056  		bytes += (psn_nume * psn_sz);
1057  	}
1058  
1059  	bytes = PAGE_ALIGN(bytes);
1060  	umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
1061  			   IB_ACCESS_LOCAL_WRITE);
1062  	if (IS_ERR(umem))
1063  		return PTR_ERR(umem);
1064  
1065  	qp->sumem = umem;
1066  	qplib_qp->sq.sg_info.umem = umem;
1067  	qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1068  	qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1069  	qplib_qp->qp_handle = ureq->qp_handle;
1070  
1071  	if (!qp->qplib_qp.srq) {
1072  		bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1073  		bytes = PAGE_ALIGN(bytes);
1074  		umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
1075  				   IB_ACCESS_LOCAL_WRITE);
1076  		if (IS_ERR(umem))
1077  			goto rqfail;
1078  		qp->rumem = umem;
1079  		qplib_qp->rq.sg_info.umem = umem;
1080  		qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1081  		qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1082  	}
1083  
1084  	qplib_qp->dpi = &cntx->dpi;
1085  	return 0;
1086  rqfail:
1087  	ib_umem_release(qp->sumem);
1088  	qp->sumem = NULL;
1089  	memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1090  
1091  	return PTR_ERR(umem);
1092  }
1093  
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1094  static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1095  				(struct bnxt_re_pd *pd,
1096  				 struct bnxt_qplib_res *qp1_res,
1097  				 struct bnxt_qplib_qp *qp1_qp)
1098  {
1099  	struct bnxt_re_dev *rdev = pd->rdev;
1100  	struct bnxt_re_ah *ah;
1101  	union ib_gid sgid;
1102  	int rc;
1103  
1104  	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1105  	if (!ah)
1106  		return NULL;
1107  
1108  	ah->rdev = rdev;
1109  	ah->qplib_ah.pd = &pd->qplib_pd;
1110  
1111  	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1112  	if (rc)
1113  		goto fail;
1114  
1115  	/* supply the dgid data same as sgid */
1116  	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1117  	       sizeof(union ib_gid));
1118  	ah->qplib_ah.sgid_index = 0;
1119  
1120  	ah->qplib_ah.traffic_class = 0;
1121  	ah->qplib_ah.flow_label = 0;
1122  	ah->qplib_ah.hop_limit = 1;
1123  	ah->qplib_ah.sl = 0;
1124  	/* Have DMAC same as SMAC */
1125  	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1126  
1127  	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1128  	if (rc) {
1129  		ibdev_err(&rdev->ibdev,
1130  			  "Failed to allocate HW AH for Shadow QP");
1131  		goto fail;
1132  	}
1133  	atomic_inc(&rdev->stats.res.ah_count);
1134  
1135  	return ah;
1136  
1137  fail:
1138  	kfree(ah);
1139  	return NULL;
1140  }
1141  
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1142  static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1143  				(struct bnxt_re_pd *pd,
1144  				 struct bnxt_qplib_res *qp1_res,
1145  				 struct bnxt_qplib_qp *qp1_qp)
1146  {
1147  	struct bnxt_re_dev *rdev = pd->rdev;
1148  	struct bnxt_re_qp *qp;
1149  	int rc;
1150  
1151  	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1152  	if (!qp)
1153  		return NULL;
1154  
1155  	qp->rdev = rdev;
1156  
1157  	/* Initialize the shadow QP structure from the QP1 values */
1158  	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1159  
1160  	qp->qplib_qp.pd = &pd->qplib_pd;
1161  	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1162  	qp->qplib_qp.type = IB_QPT_UD;
1163  
1164  	qp->qplib_qp.max_inline_data = 0;
1165  	qp->qplib_qp.sig_type = true;
1166  
1167  	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1168  	qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1169  	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1170  	qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
1171  	qp->qplib_qp.sq.max_sge = 2;
1172  	/* Q full delta can be 1 since it is internal QP */
1173  	qp->qplib_qp.sq.q_full_delta = 1;
1174  	qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1175  	qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1176  
1177  	qp->qplib_qp.scq = qp1_qp->scq;
1178  	qp->qplib_qp.rcq = qp1_qp->rcq;
1179  
1180  	qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1181  	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1182  	qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
1183  	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1184  	/* Q full delta can be 1 since it is internal QP */
1185  	qp->qplib_qp.rq.q_full_delta = 1;
1186  	qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1187  	qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1188  
1189  	qp->qplib_qp.mtu = qp1_qp->mtu;
1190  
1191  	qp->qplib_qp.sq_hdr_buf_size = 0;
1192  	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1193  	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1194  
1195  	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1196  	if (rc)
1197  		goto fail;
1198  
1199  	spin_lock_init(&qp->sq_lock);
1200  	INIT_LIST_HEAD(&qp->list);
1201  	mutex_lock(&rdev->qp_lock);
1202  	list_add_tail(&qp->list, &rdev->qp_list);
1203  	atomic_inc(&rdev->stats.res.qp_count);
1204  	mutex_unlock(&rdev->qp_lock);
1205  	return qp;
1206  fail:
1207  	kfree(qp);
1208  	return NULL;
1209  }
1210  
bnxt_re_init_rq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1211  static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1212  				struct ib_qp_init_attr *init_attr,
1213  				struct bnxt_re_ucontext *uctx)
1214  {
1215  	struct bnxt_qplib_dev_attr *dev_attr;
1216  	struct bnxt_qplib_qp *qplqp;
1217  	struct bnxt_re_dev *rdev;
1218  	struct bnxt_qplib_q *rq;
1219  	int entries;
1220  
1221  	rdev = qp->rdev;
1222  	qplqp = &qp->qplib_qp;
1223  	rq = &qplqp->rq;
1224  	dev_attr = &rdev->dev_attr;
1225  
1226  	if (init_attr->srq) {
1227  		struct bnxt_re_srq *srq;
1228  
1229  		srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1230  		qplqp->srq = &srq->qplib_srq;
1231  		rq->max_wqe = 0;
1232  	} else {
1233  		rq->max_sge = init_attr->cap.max_recv_sge;
1234  		if (rq->max_sge > dev_attr->max_qp_sges)
1235  			rq->max_sge = dev_attr->max_qp_sges;
1236  		init_attr->cap.max_recv_sge = rq->max_sge;
1237  		rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1238  						       dev_attr->max_qp_sges);
1239  		/* Allocate 1 more than what's provided so posting max doesn't
1240  		 * mean empty.
1241  		 */
1242  		entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
1243  		rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1244  		rq->max_sw_wqe = rq->max_wqe;
1245  		rq->q_full_delta = 0;
1246  		rq->sg_info.pgsize = PAGE_SIZE;
1247  		rq->sg_info.pgshft = PAGE_SHIFT;
1248  	}
1249  
1250  	return 0;
1251  }
1252  
bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp * qp)1253  static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1254  {
1255  	struct bnxt_qplib_dev_attr *dev_attr;
1256  	struct bnxt_qplib_qp *qplqp;
1257  	struct bnxt_re_dev *rdev;
1258  
1259  	rdev = qp->rdev;
1260  	qplqp = &qp->qplib_qp;
1261  	dev_attr = &rdev->dev_attr;
1262  
1263  	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1264  		qplqp->rq.max_sge = dev_attr->max_qp_sges;
1265  		if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1266  			qplqp->rq.max_sge = dev_attr->max_qp_sges;
1267  		qplqp->rq.max_sge = 6;
1268  	}
1269  }
1270  
bnxt_re_init_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx,struct bnxt_re_qp_req * ureq)1271  static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1272  				struct ib_qp_init_attr *init_attr,
1273  				struct bnxt_re_ucontext *uctx,
1274  				struct bnxt_re_qp_req *ureq)
1275  {
1276  	struct bnxt_qplib_dev_attr *dev_attr;
1277  	struct bnxt_qplib_qp *qplqp;
1278  	struct bnxt_re_dev *rdev;
1279  	struct bnxt_qplib_q *sq;
1280  	int diff = 0;
1281  	int entries;
1282  	int rc;
1283  
1284  	rdev = qp->rdev;
1285  	qplqp = &qp->qplib_qp;
1286  	sq = &qplqp->sq;
1287  	dev_attr = &rdev->dev_attr;
1288  
1289  	sq->max_sge = init_attr->cap.max_send_sge;
1290  	entries = init_attr->cap.max_send_wr;
1291  	if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
1292  		sq->max_wqe = ureq->sq_slots;
1293  		sq->max_sw_wqe = ureq->sq_slots;
1294  		sq->wqe_size = sizeof(struct sq_sge);
1295  	} else {
1296  		if (sq->max_sge > dev_attr->max_qp_sges) {
1297  			sq->max_sge = dev_attr->max_qp_sges;
1298  			init_attr->cap.max_send_sge = sq->max_sge;
1299  		}
1300  
1301  		rc = bnxt_re_setup_swqe_size(qp, init_attr);
1302  		if (rc)
1303  			return rc;
1304  
1305  		/* Allocate 128 + 1 more than what's provided */
1306  		diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1307  			0 : BNXT_QPLIB_RESERVED_QP_WRS;
1308  		entries = bnxt_re_init_depth(entries + diff + 1, uctx);
1309  		sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1310  		if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1311  			sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
1312  		else
1313  			sq->max_sw_wqe = sq->max_wqe;
1314  
1315  	}
1316  	sq->q_full_delta = diff + 1;
1317  	/*
1318  	 * Reserving one slot for Phantom WQE. Application can
1319  	 * post one extra entry in this case. But allowing this to avoid
1320  	 * unexpected Queue full condition
1321  	 */
1322  	qplqp->sq.q_full_delta -= 1;
1323  	qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1324  	qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1325  
1326  	return 0;
1327  }
1328  
bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1329  static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1330  				       struct ib_qp_init_attr *init_attr,
1331  				       struct bnxt_re_ucontext *uctx)
1332  {
1333  	struct bnxt_qplib_dev_attr *dev_attr;
1334  	struct bnxt_qplib_qp *qplqp;
1335  	struct bnxt_re_dev *rdev;
1336  	int entries;
1337  
1338  	rdev = qp->rdev;
1339  	qplqp = &qp->qplib_qp;
1340  	dev_attr = &rdev->dev_attr;
1341  
1342  	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1343  		entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
1344  		qplqp->sq.max_wqe = min_t(u32, entries,
1345  					  dev_attr->max_qp_wqes + 1);
1346  		qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1347  			init_attr->cap.max_send_wr;
1348  		qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1349  		if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1350  			qplqp->sq.max_sge = dev_attr->max_qp_sges;
1351  	}
1352  }
1353  
bnxt_re_init_qp_type(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr)1354  static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1355  				struct ib_qp_init_attr *init_attr)
1356  {
1357  	struct bnxt_qplib_chip_ctx *chip_ctx;
1358  	int qptype;
1359  
1360  	chip_ctx = rdev->chip_ctx;
1361  
1362  	qptype = __from_ib_qp_type(init_attr->qp_type);
1363  	if (qptype == IB_QPT_MAX) {
1364  		ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1365  		qptype = -EOPNOTSUPP;
1366  		goto out;
1367  	}
1368  
1369  	if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
1370  	    init_attr->qp_type == IB_QPT_GSI)
1371  		qptype = CMDQ_CREATE_QP_TYPE_GSI;
1372  out:
1373  	return qptype;
1374  }
1375  
bnxt_re_init_qp_attr(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx,struct bnxt_re_qp_req * ureq)1376  static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1377  				struct ib_qp_init_attr *init_attr,
1378  				struct bnxt_re_ucontext *uctx,
1379  				struct bnxt_re_qp_req *ureq)
1380  {
1381  	struct bnxt_qplib_dev_attr *dev_attr;
1382  	struct bnxt_qplib_qp *qplqp;
1383  	struct bnxt_re_dev *rdev;
1384  	struct bnxt_re_cq *cq;
1385  	int rc = 0, qptype;
1386  
1387  	rdev = qp->rdev;
1388  	qplqp = &qp->qplib_qp;
1389  	dev_attr = &rdev->dev_attr;
1390  
1391  	/* Setup misc params */
1392  	ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1393  	qplqp->pd = &pd->qplib_pd;
1394  	qplqp->qp_handle = (u64)qplqp;
1395  	qplqp->max_inline_data = init_attr->cap.max_inline_data;
1396  	qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1397  	qptype = bnxt_re_init_qp_type(rdev, init_attr);
1398  	if (qptype < 0) {
1399  		rc = qptype;
1400  		goto out;
1401  	}
1402  	qplqp->type = (u8)qptype;
1403  	qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
1404  	if (init_attr->qp_type == IB_QPT_RC) {
1405  		qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1406  		qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1407  	}
1408  	qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1409  	qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1410  	if (init_attr->create_flags) {
1411  		ibdev_dbg(&rdev->ibdev,
1412  			  "QP create flags 0x%x not supported",
1413  			  init_attr->create_flags);
1414  		return -EOPNOTSUPP;
1415  	}
1416  
1417  	/* Setup CQs */
1418  	if (init_attr->send_cq) {
1419  		cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1420  		qplqp->scq = &cq->qplib_cq;
1421  		qp->scq = cq;
1422  	}
1423  
1424  	if (init_attr->recv_cq) {
1425  		cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1426  		qplqp->rcq = &cq->qplib_cq;
1427  		qp->rcq = cq;
1428  	}
1429  
1430  	/* Setup RQ/SRQ */
1431  	rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
1432  	if (rc)
1433  		goto out;
1434  	if (init_attr->qp_type == IB_QPT_GSI)
1435  		bnxt_re_adjust_gsi_rq_attr(qp);
1436  
1437  	/* Setup SQ */
1438  	rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
1439  	if (rc)
1440  		goto out;
1441  	if (init_attr->qp_type == IB_QPT_GSI)
1442  		bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
1443  
1444  	if (uctx) /* This will update DPI and qp_handle */
1445  		rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
1446  out:
1447  	return rc;
1448  }
1449  
bnxt_re_create_shadow_gsi(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)1450  static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1451  				     struct bnxt_re_pd *pd)
1452  {
1453  	struct bnxt_re_sqp_entries *sqp_tbl;
1454  	struct bnxt_re_dev *rdev;
1455  	struct bnxt_re_qp *sqp;
1456  	struct bnxt_re_ah *sah;
1457  	int rc = 0;
1458  
1459  	rdev = qp->rdev;
1460  	/* Create a shadow QP to handle the QP1 traffic */
1461  	sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1462  			  GFP_KERNEL);
1463  	if (!sqp_tbl)
1464  		return -ENOMEM;
1465  	rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1466  
1467  	sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1468  	if (!sqp) {
1469  		rc = -ENODEV;
1470  		ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1471  		goto out;
1472  	}
1473  	rdev->gsi_ctx.gsi_sqp = sqp;
1474  
1475  	sqp->rcq = qp->rcq;
1476  	sqp->scq = qp->scq;
1477  	sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1478  					  &qp->qplib_qp);
1479  	if (!sah) {
1480  		bnxt_qplib_destroy_qp(&rdev->qplib_res,
1481  				      &sqp->qplib_qp);
1482  		rc = -ENODEV;
1483  		ibdev_err(&rdev->ibdev,
1484  			  "Failed to create AH entry for ShadowQP");
1485  		goto out;
1486  	}
1487  	rdev->gsi_ctx.gsi_sah = sah;
1488  
1489  	return 0;
1490  out:
1491  	kfree(sqp_tbl);
1492  	return rc;
1493  }
1494  
bnxt_re_create_gsi_qp(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr)1495  static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1496  				 struct ib_qp_init_attr *init_attr)
1497  {
1498  	struct bnxt_re_dev *rdev;
1499  	struct bnxt_qplib_qp *qplqp;
1500  	int rc;
1501  
1502  	rdev = qp->rdev;
1503  	qplqp = &qp->qplib_qp;
1504  
1505  	qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1506  	qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1507  
1508  	rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1509  	if (rc) {
1510  		ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1511  		goto out;
1512  	}
1513  
1514  	rc = bnxt_re_create_shadow_gsi(qp, pd);
1515  out:
1516  	return rc;
1517  }
1518  
bnxt_re_test_qp_limits(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr,struct bnxt_qplib_dev_attr * dev_attr)1519  static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1520  				   struct ib_qp_init_attr *init_attr,
1521  				   struct bnxt_qplib_dev_attr *dev_attr)
1522  {
1523  	bool rc = true;
1524  
1525  	if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1526  	    init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1527  	    init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1528  	    init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1529  	    init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1530  		ibdev_err(&rdev->ibdev,
1531  			  "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1532  			  init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1533  			  init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1534  			  init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1535  			  init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1536  			  init_attr->cap.max_inline_data,
1537  			  dev_attr->max_inline_data);
1538  		rc = false;
1539  	}
1540  	return rc;
1541  }
1542  
bnxt_re_create_qp(struct ib_qp * ib_qp,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)1543  int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1544  		      struct ib_udata *udata)
1545  {
1546  	struct bnxt_qplib_dev_attr *dev_attr;
1547  	struct bnxt_re_ucontext *uctx;
1548  	struct bnxt_re_qp_req ureq;
1549  	struct bnxt_re_dev *rdev;
1550  	struct bnxt_re_pd *pd;
1551  	struct bnxt_re_qp *qp;
1552  	struct ib_pd *ib_pd;
1553  	u32 active_qps;
1554  	int rc;
1555  
1556  	ib_pd = ib_qp->pd;
1557  	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1558  	rdev = pd->rdev;
1559  	dev_attr = &rdev->dev_attr;
1560  	qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1561  
1562  	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1563  	if (udata)
1564  		if (ib_copy_from_udata(&ureq, udata,  min(udata->inlen, sizeof(ureq))))
1565  			return -EFAULT;
1566  
1567  	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1568  	if (!rc) {
1569  		rc = -EINVAL;
1570  		goto fail;
1571  	}
1572  
1573  	qp->rdev = rdev;
1574  	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
1575  	if (rc)
1576  		goto fail;
1577  
1578  	if (qp_init_attr->qp_type == IB_QPT_GSI &&
1579  	    !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
1580  		rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1581  		if (rc == -ENODEV)
1582  			goto qp_destroy;
1583  		if (rc)
1584  			goto fail;
1585  	} else {
1586  		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1587  		if (rc) {
1588  			ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1589  			goto free_umem;
1590  		}
1591  		if (udata) {
1592  			struct bnxt_re_qp_resp resp;
1593  
1594  			resp.qpid = qp->qplib_qp.id;
1595  			resp.rsvd = 0;
1596  			rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1597  			if (rc) {
1598  				ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1599  				goto qp_destroy;
1600  			}
1601  		}
1602  	}
1603  
1604  	qp->ib_qp.qp_num = qp->qplib_qp.id;
1605  	if (qp_init_attr->qp_type == IB_QPT_GSI)
1606  		rdev->gsi_ctx.gsi_qp = qp;
1607  	spin_lock_init(&qp->sq_lock);
1608  	spin_lock_init(&qp->rq_lock);
1609  	INIT_LIST_HEAD(&qp->list);
1610  	mutex_lock(&rdev->qp_lock);
1611  	list_add_tail(&qp->list, &rdev->qp_list);
1612  	mutex_unlock(&rdev->qp_lock);
1613  	active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1614  	if (active_qps > rdev->stats.res.qp_watermark)
1615  		rdev->stats.res.qp_watermark = active_qps;
1616  	if (qp_init_attr->qp_type == IB_QPT_RC) {
1617  		active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1618  		if (active_qps > rdev->stats.res.rc_qp_watermark)
1619  			rdev->stats.res.rc_qp_watermark = active_qps;
1620  	} else if (qp_init_attr->qp_type == IB_QPT_UD) {
1621  		active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1622  		if (active_qps > rdev->stats.res.ud_qp_watermark)
1623  			rdev->stats.res.ud_qp_watermark = active_qps;
1624  	}
1625  
1626  	return 0;
1627  qp_destroy:
1628  	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1629  free_umem:
1630  	ib_umem_release(qp->rumem);
1631  	ib_umem_release(qp->sumem);
1632  fail:
1633  	return rc;
1634  }
1635  
__from_ib_qp_state(enum ib_qp_state state)1636  static u8 __from_ib_qp_state(enum ib_qp_state state)
1637  {
1638  	switch (state) {
1639  	case IB_QPS_RESET:
1640  		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1641  	case IB_QPS_INIT:
1642  		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1643  	case IB_QPS_RTR:
1644  		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1645  	case IB_QPS_RTS:
1646  		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1647  	case IB_QPS_SQD:
1648  		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1649  	case IB_QPS_SQE:
1650  		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1651  	case IB_QPS_ERR:
1652  	default:
1653  		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1654  	}
1655  }
1656  
__to_ib_qp_state(u8 state)1657  static enum ib_qp_state __to_ib_qp_state(u8 state)
1658  {
1659  	switch (state) {
1660  	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1661  		return IB_QPS_RESET;
1662  	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1663  		return IB_QPS_INIT;
1664  	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1665  		return IB_QPS_RTR;
1666  	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1667  		return IB_QPS_RTS;
1668  	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1669  		return IB_QPS_SQD;
1670  	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1671  		return IB_QPS_SQE;
1672  	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1673  	default:
1674  		return IB_QPS_ERR;
1675  	}
1676  }
1677  
__from_ib_mtu(enum ib_mtu mtu)1678  static u32 __from_ib_mtu(enum ib_mtu mtu)
1679  {
1680  	switch (mtu) {
1681  	case IB_MTU_256:
1682  		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1683  	case IB_MTU_512:
1684  		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1685  	case IB_MTU_1024:
1686  		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1687  	case IB_MTU_2048:
1688  		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1689  	case IB_MTU_4096:
1690  		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1691  	default:
1692  		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1693  	}
1694  }
1695  
__to_ib_mtu(u32 mtu)1696  static enum ib_mtu __to_ib_mtu(u32 mtu)
1697  {
1698  	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1699  	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1700  		return IB_MTU_256;
1701  	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1702  		return IB_MTU_512;
1703  	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1704  		return IB_MTU_1024;
1705  	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1706  		return IB_MTU_2048;
1707  	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1708  		return IB_MTU_4096;
1709  	default:
1710  		return IB_MTU_2048;
1711  	}
1712  }
1713  
1714  /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq,struct ib_udata * udata)1715  int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1716  {
1717  	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1718  					       ib_srq);
1719  	struct bnxt_re_dev *rdev = srq->rdev;
1720  	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1721  	struct bnxt_qplib_nq *nq = NULL;
1722  
1723  	if (qplib_srq->cq)
1724  		nq = qplib_srq->cq->nq;
1725  	if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1726  		free_page((unsigned long)srq->uctx_srq_page);
1727  		hash_del(&srq->hash_entry);
1728  	}
1729  	bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1730  	ib_umem_release(srq->umem);
1731  	atomic_dec(&rdev->stats.res.srq_count);
1732  	if (nq)
1733  		nq->budget--;
1734  	return 0;
1735  }
1736  
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1737  static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1738  				 struct bnxt_re_pd *pd,
1739  				 struct bnxt_re_srq *srq,
1740  				 struct ib_udata *udata)
1741  {
1742  	struct bnxt_re_srq_req ureq;
1743  	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1744  	struct ib_umem *umem;
1745  	int bytes = 0;
1746  	struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1747  		udata, struct bnxt_re_ucontext, ib_uctx);
1748  
1749  	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1750  		return -EFAULT;
1751  
1752  	bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1753  	bytes = PAGE_ALIGN(bytes);
1754  	umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1755  			   IB_ACCESS_LOCAL_WRITE);
1756  	if (IS_ERR(umem))
1757  		return PTR_ERR(umem);
1758  
1759  	srq->umem = umem;
1760  	qplib_srq->sg_info.umem = umem;
1761  	qplib_srq->sg_info.pgsize = PAGE_SIZE;
1762  	qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1763  	qplib_srq->srq_handle = ureq.srq_handle;
1764  	qplib_srq->dpi = &cntx->dpi;
1765  
1766  	return 0;
1767  }
1768  
bnxt_re_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1769  int bnxt_re_create_srq(struct ib_srq *ib_srq,
1770  		       struct ib_srq_init_attr *srq_init_attr,
1771  		       struct ib_udata *udata)
1772  {
1773  	struct bnxt_qplib_dev_attr *dev_attr;
1774  	struct bnxt_qplib_nq *nq = NULL;
1775  	struct bnxt_re_ucontext *uctx;
1776  	struct bnxt_re_dev *rdev;
1777  	struct bnxt_re_srq *srq;
1778  	struct bnxt_re_pd *pd;
1779  	struct ib_pd *ib_pd;
1780  	u32 active_srqs;
1781  	int rc, entries;
1782  
1783  	ib_pd = ib_srq->pd;
1784  	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1785  	rdev = pd->rdev;
1786  	dev_attr = &rdev->dev_attr;
1787  	srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1788  
1789  	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1790  		ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1791  		rc = -EINVAL;
1792  		goto exit;
1793  	}
1794  
1795  	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1796  		rc = -EOPNOTSUPP;
1797  		goto exit;
1798  	}
1799  
1800  	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1801  	srq->rdev = rdev;
1802  	srq->qplib_srq.pd = &pd->qplib_pd;
1803  	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1804  	/* Allocate 1 more than what's provided so posting max doesn't
1805  	 * mean empty
1806  	 */
1807  	entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
1808  	if (entries > dev_attr->max_srq_wqes + 1)
1809  		entries = dev_attr->max_srq_wqes + 1;
1810  	srq->qplib_srq.max_wqe = entries;
1811  
1812  	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1813  	 /* 128 byte wqe size for SRQ . So use max sges */
1814  	srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1815  	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1816  	srq->srq_limit = srq_init_attr->attr.srq_limit;
1817  	srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1818  	nq = &rdev->nq[0];
1819  
1820  	if (udata) {
1821  		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1822  		if (rc)
1823  			goto fail;
1824  	}
1825  
1826  	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1827  	if (rc) {
1828  		ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1829  		goto fail;
1830  	}
1831  
1832  	if (udata) {
1833  		struct bnxt_re_srq_resp resp = {};
1834  
1835  		resp.srqid = srq->qplib_srq.id;
1836  		if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1837  			hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
1838  			srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
1839  			if (!srq->uctx_srq_page) {
1840  				rc = -ENOMEM;
1841  				goto fail;
1842  			}
1843  			resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
1844  		}
1845  		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1846  		if (rc) {
1847  			ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1848  			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1849  					       &srq->qplib_srq);
1850  			goto fail;
1851  		}
1852  	}
1853  	if (nq)
1854  		nq->budget++;
1855  	active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1856  	if (active_srqs > rdev->stats.res.srq_watermark)
1857  		rdev->stats.res.srq_watermark = active_srqs;
1858  	spin_lock_init(&srq->lock);
1859  
1860  	return 0;
1861  
1862  fail:
1863  	ib_umem_release(srq->umem);
1864  exit:
1865  	return rc;
1866  }
1867  
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1868  int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1869  		       enum ib_srq_attr_mask srq_attr_mask,
1870  		       struct ib_udata *udata)
1871  {
1872  	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1873  					       ib_srq);
1874  	struct bnxt_re_dev *rdev = srq->rdev;
1875  	int rc;
1876  
1877  	switch (srq_attr_mask) {
1878  	case IB_SRQ_MAX_WR:
1879  		/* SRQ resize is not supported */
1880  		return -EINVAL;
1881  	case IB_SRQ_LIMIT:
1882  		/* Change the SRQ threshold */
1883  		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1884  			return -EINVAL;
1885  
1886  		srq->qplib_srq.threshold = srq_attr->srq_limit;
1887  		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1888  		if (rc) {
1889  			ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1890  			return rc;
1891  		}
1892  		/* On success, update the shadow */
1893  		srq->srq_limit = srq_attr->srq_limit;
1894  		/* No need to Build and send response back to udata */
1895  		return 0;
1896  	default:
1897  		ibdev_err(&rdev->ibdev,
1898  			  "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1899  		return -EINVAL;
1900  	}
1901  }
1902  
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1903  int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1904  {
1905  	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1906  					       ib_srq);
1907  	struct bnxt_re_srq tsrq;
1908  	struct bnxt_re_dev *rdev = srq->rdev;
1909  	int rc;
1910  
1911  	/* Get live SRQ attr */
1912  	tsrq.qplib_srq.id = srq->qplib_srq.id;
1913  	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1914  	if (rc) {
1915  		ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1916  		return rc;
1917  	}
1918  	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1919  	srq_attr->max_sge = srq->qplib_srq.max_sge;
1920  	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1921  
1922  	return 0;
1923  }
1924  
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1925  int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1926  			  const struct ib_recv_wr **bad_wr)
1927  {
1928  	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1929  					       ib_srq);
1930  	struct bnxt_qplib_swqe wqe;
1931  	unsigned long flags;
1932  	int rc = 0;
1933  
1934  	spin_lock_irqsave(&srq->lock, flags);
1935  	while (wr) {
1936  		/* Transcribe each ib_recv_wr to qplib_swqe */
1937  		wqe.num_sge = wr->num_sge;
1938  		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1939  		wqe.wr_id = wr->wr_id;
1940  		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1941  
1942  		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1943  		if (rc) {
1944  			*bad_wr = wr;
1945  			break;
1946  		}
1947  		wr = wr->next;
1948  	}
1949  	spin_unlock_irqrestore(&srq->lock, flags);
1950  
1951  	return rc;
1952  }
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)1953  static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1954  				    struct bnxt_re_qp *qp1_qp,
1955  				    int qp_attr_mask)
1956  {
1957  	struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1958  	int rc;
1959  
1960  	if (qp_attr_mask & IB_QP_STATE) {
1961  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1962  		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1963  	}
1964  	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1965  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1966  		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1967  	}
1968  
1969  	if (qp_attr_mask & IB_QP_QKEY) {
1970  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1971  		/* Using a Random  QKEY */
1972  		qp->qplib_qp.qkey = 0x81818181;
1973  	}
1974  	if (qp_attr_mask & IB_QP_SQ_PSN) {
1975  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1976  		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1977  	}
1978  
1979  	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1980  	if (rc)
1981  		ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1982  	return rc;
1983  }
1984  
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)1985  int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1986  		      int qp_attr_mask, struct ib_udata *udata)
1987  {
1988  	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1989  	struct bnxt_re_dev *rdev = qp->rdev;
1990  	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1991  	enum ib_qp_state curr_qp_state, new_qp_state;
1992  	int rc, entries;
1993  	unsigned int flags;
1994  	u8 nw_type;
1995  
1996  	if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1997  		return -EOPNOTSUPP;
1998  
1999  	qp->qplib_qp.modify_flags = 0;
2000  	if (qp_attr_mask & IB_QP_STATE) {
2001  		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
2002  		new_qp_state = qp_attr->qp_state;
2003  		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
2004  					ib_qp->qp_type, qp_attr_mask)) {
2005  			ibdev_err(&rdev->ibdev,
2006  				  "Invalid attribute mask: %#x specified ",
2007  				  qp_attr_mask);
2008  			ibdev_err(&rdev->ibdev,
2009  				  "for qpn: %#x type: %#x",
2010  				  ib_qp->qp_num, ib_qp->qp_type);
2011  			ibdev_err(&rdev->ibdev,
2012  				  "curr_qp_state=0x%x, new_qp_state=0x%x\n",
2013  				  curr_qp_state, new_qp_state);
2014  			return -EINVAL;
2015  		}
2016  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2017  		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
2018  
2019  		if (!qp->sumem &&
2020  		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2021  			ibdev_dbg(&rdev->ibdev,
2022  				  "Move QP = %p to flush list\n", qp);
2023  			flags = bnxt_re_lock_cqs(qp);
2024  			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
2025  			bnxt_re_unlock_cqs(qp, flags);
2026  		}
2027  		if (!qp->sumem &&
2028  		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2029  			ibdev_dbg(&rdev->ibdev,
2030  				  "Move QP = %p out of flush list\n", qp);
2031  			flags = bnxt_re_lock_cqs(qp);
2032  			bnxt_qplib_clean_qp(&qp->qplib_qp);
2033  			bnxt_re_unlock_cqs(qp, flags);
2034  		}
2035  	}
2036  	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
2037  		qp->qplib_qp.modify_flags |=
2038  				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
2039  		qp->qplib_qp.en_sqd_async_notify = true;
2040  	}
2041  	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
2042  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
2043  		qp->qplib_qp.access =
2044  			__from_ib_access_flags(qp_attr->qp_access_flags);
2045  		/* LOCAL_WRITE access must be set to allow RC receive */
2046  		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
2047  		/* Temp: Set all params on QP as of now */
2048  		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
2049  		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2050  	}
2051  	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2052  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2053  		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2054  	}
2055  	if (qp_attr_mask & IB_QP_QKEY) {
2056  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2057  		qp->qplib_qp.qkey = qp_attr->qkey;
2058  	}
2059  	if (qp_attr_mask & IB_QP_AV) {
2060  		const struct ib_global_route *grh =
2061  			rdma_ah_read_grh(&qp_attr->ah_attr);
2062  		const struct ib_gid_attr *sgid_attr;
2063  		struct bnxt_re_gid_ctx *ctx;
2064  
2065  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2066  				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2067  				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2068  				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2069  				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2070  				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2071  				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2072  		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2073  		       sizeof(qp->qplib_qp.ah.dgid.data));
2074  		qp->qplib_qp.ah.flow_label = grh->flow_label;
2075  		sgid_attr = grh->sgid_attr;
2076  		/* Get the HW context of the GID. The reference
2077  		 * of GID table entry is already taken by the caller.
2078  		 */
2079  		ctx = rdma_read_gid_hw_context(sgid_attr);
2080  		qp->qplib_qp.ah.sgid_index = ctx->idx;
2081  		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2082  		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2083  		qp->qplib_qp.ah.traffic_class = grh->traffic_class;
2084  		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2085  		ether_addr_copy(qp->qplib_qp.ah.dmac,
2086  				qp_attr->ah_attr.roce.dmac);
2087  
2088  		rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2089  					     &qp->qplib_qp.smac[0]);
2090  		if (rc)
2091  			return rc;
2092  
2093  		nw_type = rdma_gid_attr_network_type(sgid_attr);
2094  		switch (nw_type) {
2095  		case RDMA_NETWORK_IPV4:
2096  			qp->qplib_qp.nw_type =
2097  				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2098  			break;
2099  		case RDMA_NETWORK_IPV6:
2100  			qp->qplib_qp.nw_type =
2101  				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2102  			break;
2103  		default:
2104  			qp->qplib_qp.nw_type =
2105  				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2106  			break;
2107  		}
2108  	}
2109  
2110  	if (qp_attr_mask & IB_QP_PATH_MTU) {
2111  		qp->qplib_qp.modify_flags |=
2112  				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2113  		qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
2114  		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
2115  	} else if (qp_attr->qp_state == IB_QPS_RTR) {
2116  		qp->qplib_qp.modify_flags |=
2117  			CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2118  		qp->qplib_qp.path_mtu =
2119  			__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
2120  		qp->qplib_qp.mtu =
2121  			ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
2122  	}
2123  
2124  	if (qp_attr_mask & IB_QP_TIMEOUT) {
2125  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2126  		qp->qplib_qp.timeout = qp_attr->timeout;
2127  	}
2128  	if (qp_attr_mask & IB_QP_RETRY_CNT) {
2129  		qp->qplib_qp.modify_flags |=
2130  				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2131  		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2132  	}
2133  	if (qp_attr_mask & IB_QP_RNR_RETRY) {
2134  		qp->qplib_qp.modify_flags |=
2135  				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2136  		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2137  	}
2138  	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2139  		qp->qplib_qp.modify_flags |=
2140  				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2141  		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2142  	}
2143  	if (qp_attr_mask & IB_QP_RQ_PSN) {
2144  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2145  		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2146  	}
2147  	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2148  		qp->qplib_qp.modify_flags |=
2149  				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2150  		/* Cap the max_rd_atomic to device max */
2151  		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2152  						   dev_attr->max_qp_rd_atom);
2153  	}
2154  	if (qp_attr_mask & IB_QP_SQ_PSN) {
2155  		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2156  		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2157  	}
2158  	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2159  		if (qp_attr->max_dest_rd_atomic >
2160  		    dev_attr->max_qp_init_rd_atom) {
2161  			ibdev_err(&rdev->ibdev,
2162  				  "max_dest_rd_atomic requested%d is > dev_max%d",
2163  				  qp_attr->max_dest_rd_atomic,
2164  				  dev_attr->max_qp_init_rd_atom);
2165  			return -EINVAL;
2166  		}
2167  
2168  		qp->qplib_qp.modify_flags |=
2169  				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2170  		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2171  	}
2172  	if (qp_attr_mask & IB_QP_CAP) {
2173  		struct bnxt_re_ucontext *uctx =
2174  			rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2175  
2176  		qp->qplib_qp.modify_flags |=
2177  				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2178  				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2179  				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2180  				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2181  				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2182  		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2183  		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2184  		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2185  		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2186  		    (qp_attr->cap.max_inline_data >=
2187  						dev_attr->max_inline_data)) {
2188  			ibdev_err(&rdev->ibdev,
2189  				  "Create QP failed - max exceeded");
2190  			return -EINVAL;
2191  		}
2192  		entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
2193  		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2194  						dev_attr->max_qp_wqes + 1);
2195  		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2196  						qp_attr->cap.max_send_wr;
2197  		/*
2198  		 * Reserving one slot for Phantom WQE. Some application can
2199  		 * post one extra entry in this case. Allowing this to avoid
2200  		 * unexpected Queue full condition
2201  		 */
2202  		qp->qplib_qp.sq.q_full_delta -= 1;
2203  		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2204  		if (qp->qplib_qp.rq.max_wqe) {
2205  			entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
2206  			qp->qplib_qp.rq.max_wqe =
2207  				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2208  			qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
2209  			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2210  						       qp_attr->cap.max_recv_wr;
2211  			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2212  		} else {
2213  			/* SRQ was used prior, just ignore the RQ caps */
2214  		}
2215  	}
2216  	if (qp_attr_mask & IB_QP_DEST_QPN) {
2217  		qp->qplib_qp.modify_flags |=
2218  				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2219  		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2220  	}
2221  	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2222  	if (rc) {
2223  		ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2224  		return rc;
2225  	}
2226  	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2227  		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2228  	return rc;
2229  }
2230  
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2231  int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2232  		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2233  {
2234  	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2235  	struct bnxt_re_dev *rdev = qp->rdev;
2236  	struct bnxt_qplib_qp *qplib_qp;
2237  	int rc;
2238  
2239  	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2240  	if (!qplib_qp)
2241  		return -ENOMEM;
2242  
2243  	qplib_qp->id = qp->qplib_qp.id;
2244  	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2245  
2246  	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2247  	if (rc) {
2248  		ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2249  		goto out;
2250  	}
2251  	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2252  	qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2253  	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2254  	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2255  	qp_attr->pkey_index = qplib_qp->pkey_index;
2256  	qp_attr->qkey = qplib_qp->qkey;
2257  	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2258  	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2259  			qplib_qp->ah.host_sgid_index,
2260  			qplib_qp->ah.hop_limit,
2261  			qplib_qp->ah.traffic_class);
2262  	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2263  	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2264  	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2265  	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2266  	qp_attr->timeout = qplib_qp->timeout;
2267  	qp_attr->retry_cnt = qplib_qp->retry_cnt;
2268  	qp_attr->rnr_retry = qplib_qp->rnr_retry;
2269  	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2270  	qp_attr->rq_psn = qplib_qp->rq.psn;
2271  	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2272  	qp_attr->sq_psn = qplib_qp->sq.psn;
2273  	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2274  	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2275  							 IB_SIGNAL_REQ_WR;
2276  	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2277  
2278  	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2279  	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2280  	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2281  	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2282  	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2283  	qp_init_attr->cap = qp_attr->cap;
2284  
2285  out:
2286  	kfree(qplib_qp);
2287  	return rc;
2288  }
2289  
2290  /* Routine for sending QP1 packets for RoCE V1 an V2
2291   */
bnxt_re_build_qp1_send_v2(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2292  static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2293  				     const struct ib_send_wr *wr,
2294  				     struct bnxt_qplib_swqe *wqe,
2295  				     int payload_size)
2296  {
2297  	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2298  					     ib_ah);
2299  	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2300  	const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2301  	struct bnxt_qplib_sge sge;
2302  	u8 nw_type;
2303  	u16 ether_type;
2304  	union ib_gid dgid;
2305  	bool is_eth = false;
2306  	bool is_vlan = false;
2307  	bool is_grh = false;
2308  	bool is_udp = false;
2309  	u8 ip_version = 0;
2310  	u16 vlan_id = 0xFFFF;
2311  	void *buf;
2312  	int i, rc;
2313  
2314  	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2315  
2316  	rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2317  	if (rc)
2318  		return rc;
2319  
2320  	/* Get network header type for this GID */
2321  	nw_type = rdma_gid_attr_network_type(sgid_attr);
2322  	switch (nw_type) {
2323  	case RDMA_NETWORK_IPV4:
2324  		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2325  		break;
2326  	case RDMA_NETWORK_IPV6:
2327  		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2328  		break;
2329  	default:
2330  		nw_type = BNXT_RE_ROCE_V1_PACKET;
2331  		break;
2332  	}
2333  	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2334  	is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2335  	if (is_udp) {
2336  		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2337  			ip_version = 4;
2338  			ether_type = ETH_P_IP;
2339  		} else {
2340  			ip_version = 6;
2341  			ether_type = ETH_P_IPV6;
2342  		}
2343  		is_grh = false;
2344  	} else {
2345  		ether_type = ETH_P_IBOE;
2346  		is_grh = true;
2347  	}
2348  
2349  	is_eth = true;
2350  	is_vlan = vlan_id && (vlan_id < 0x1000);
2351  
2352  	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2353  			  ip_version, is_udp, 0, &qp->qp1_hdr);
2354  
2355  	/* ETH */
2356  	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2357  	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2358  
2359  	/* For vlan, check the sgid for vlan existence */
2360  
2361  	if (!is_vlan) {
2362  		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2363  	} else {
2364  		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2365  		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2366  	}
2367  
2368  	if (is_grh || (ip_version == 6)) {
2369  		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2370  		       sizeof(sgid_attr->gid));
2371  		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2372  		       sizeof(sgid_attr->gid));
2373  		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
2374  	}
2375  
2376  	if (ip_version == 4) {
2377  		qp->qp1_hdr.ip4.tos = 0;
2378  		qp->qp1_hdr.ip4.id = 0;
2379  		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2380  		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2381  
2382  		memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2383  		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2384  		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2385  	}
2386  
2387  	if (is_udp) {
2388  		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2389  		qp->qp1_hdr.udp.sport = htons(0x8CD1);
2390  		qp->qp1_hdr.udp.csum = 0;
2391  	}
2392  
2393  	/* BTH */
2394  	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2395  		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2396  		qp->qp1_hdr.immediate_present = 1;
2397  	} else {
2398  		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2399  	}
2400  	if (wr->send_flags & IB_SEND_SOLICITED)
2401  		qp->qp1_hdr.bth.solicited_event = 1;
2402  	/* pad_count */
2403  	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2404  
2405  	/* P_key for QP1 is for all members */
2406  	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2407  	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2408  	qp->qp1_hdr.bth.ack_req = 0;
2409  	qp->send_psn++;
2410  	qp->send_psn &= BTH_PSN_MASK;
2411  	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2412  	/* DETH */
2413  	/* Use the priviledged Q_Key for QP1 */
2414  	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2415  	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2416  
2417  	/* Pack the QP1 to the transmit buffer */
2418  	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2419  	if (buf) {
2420  		ib_ud_header_pack(&qp->qp1_hdr, buf);
2421  		for (i = wqe->num_sge; i; i--) {
2422  			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2423  			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2424  			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2425  		}
2426  
2427  		/*
2428  		 * Max Header buf size for IPV6 RoCE V2 is 86,
2429  		 * which is same as the QP1 SQ header buffer.
2430  		 * Header buf size for IPV4 RoCE V2 can be 66.
2431  		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2432  		 * Subtract 20 bytes from QP1 SQ header buf size
2433  		 */
2434  		if (is_udp && ip_version == 4)
2435  			sge.size -= 20;
2436  		/*
2437  		 * Max Header buf size for RoCE V1 is 78.
2438  		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2439  		 * Subtract 8 bytes from QP1 SQ header buf size
2440  		 */
2441  		if (!is_udp)
2442  			sge.size -= 8;
2443  
2444  		/* Subtract 4 bytes for non vlan packets */
2445  		if (!is_vlan)
2446  			sge.size -= 4;
2447  
2448  		wqe->sg_list[0].addr = sge.addr;
2449  		wqe->sg_list[0].lkey = sge.lkey;
2450  		wqe->sg_list[0].size = sge.size;
2451  		wqe->num_sge++;
2452  
2453  	} else {
2454  		ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2455  		rc = -ENOMEM;
2456  	}
2457  	return rc;
2458  }
2459  
2460  /* For the MAD layer, it only provides the recv SGE the size of
2461   * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2462   * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2463   * receive packet (334 bytes) with no VLAN and then copy the GRH
2464   * and the MAD datagram out to the provided SGE.
2465   */
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2466  static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2467  					    const struct ib_recv_wr *wr,
2468  					    struct bnxt_qplib_swqe *wqe,
2469  					    int payload_size)
2470  {
2471  	struct bnxt_re_sqp_entries *sqp_entry;
2472  	struct bnxt_qplib_sge ref, sge;
2473  	struct bnxt_re_dev *rdev;
2474  	u32 rq_prod_index;
2475  
2476  	rdev = qp->rdev;
2477  
2478  	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2479  
2480  	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2481  		return -ENOMEM;
2482  
2483  	/* Create 1 SGE to receive the entire
2484  	 * ethernet packet
2485  	 */
2486  	/* Save the reference from ULP */
2487  	ref.addr = wqe->sg_list[0].addr;
2488  	ref.lkey = wqe->sg_list[0].lkey;
2489  	ref.size = wqe->sg_list[0].size;
2490  
2491  	sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2492  
2493  	/* SGE 1 */
2494  	wqe->sg_list[0].addr = sge.addr;
2495  	wqe->sg_list[0].lkey = sge.lkey;
2496  	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2497  	sge.size -= wqe->sg_list[0].size;
2498  
2499  	sqp_entry->sge.addr = ref.addr;
2500  	sqp_entry->sge.lkey = ref.lkey;
2501  	sqp_entry->sge.size = ref.size;
2502  	/* Store the wrid for reporting completion */
2503  	sqp_entry->wrid = wqe->wr_id;
2504  	/* change the wqe->wrid to table index */
2505  	wqe->wr_id = rq_prod_index;
2506  	return 0;
2507  }
2508  
is_ud_qp(struct bnxt_re_qp * qp)2509  static int is_ud_qp(struct bnxt_re_qp *qp)
2510  {
2511  	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2512  		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2513  }
2514  
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2515  static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2516  				  const struct ib_send_wr *wr,
2517  				  struct bnxt_qplib_swqe *wqe)
2518  {
2519  	struct bnxt_re_ah *ah = NULL;
2520  
2521  	if (is_ud_qp(qp)) {
2522  		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2523  		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2524  		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2525  		wqe->send.avid = ah->qplib_ah.id;
2526  	}
2527  	switch (wr->opcode) {
2528  	case IB_WR_SEND:
2529  		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2530  		break;
2531  	case IB_WR_SEND_WITH_IMM:
2532  		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2533  		wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
2534  		break;
2535  	case IB_WR_SEND_WITH_INV:
2536  		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2537  		wqe->send.inv_key = wr->ex.invalidate_rkey;
2538  		break;
2539  	default:
2540  		return -EINVAL;
2541  	}
2542  	if (wr->send_flags & IB_SEND_SIGNALED)
2543  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2544  	if (wr->send_flags & IB_SEND_FENCE)
2545  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2546  	if (wr->send_flags & IB_SEND_SOLICITED)
2547  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2548  	if (wr->send_flags & IB_SEND_INLINE)
2549  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2550  
2551  	return 0;
2552  }
2553  
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2554  static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2555  				  struct bnxt_qplib_swqe *wqe)
2556  {
2557  	switch (wr->opcode) {
2558  	case IB_WR_RDMA_WRITE:
2559  		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2560  		break;
2561  	case IB_WR_RDMA_WRITE_WITH_IMM:
2562  		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2563  		wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
2564  		break;
2565  	case IB_WR_RDMA_READ:
2566  		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2567  		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2568  		break;
2569  	default:
2570  		return -EINVAL;
2571  	}
2572  	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2573  	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2574  	if (wr->send_flags & IB_SEND_SIGNALED)
2575  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2576  	if (wr->send_flags & IB_SEND_FENCE)
2577  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2578  	if (wr->send_flags & IB_SEND_SOLICITED)
2579  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2580  	if (wr->send_flags & IB_SEND_INLINE)
2581  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2582  
2583  	return 0;
2584  }
2585  
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2586  static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2587  				    struct bnxt_qplib_swqe *wqe)
2588  {
2589  	switch (wr->opcode) {
2590  	case IB_WR_ATOMIC_CMP_AND_SWP:
2591  		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2592  		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2593  		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2594  		break;
2595  	case IB_WR_ATOMIC_FETCH_AND_ADD:
2596  		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2597  		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2598  		break;
2599  	default:
2600  		return -EINVAL;
2601  	}
2602  	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2603  	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2604  	if (wr->send_flags & IB_SEND_SIGNALED)
2605  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2606  	if (wr->send_flags & IB_SEND_FENCE)
2607  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2608  	if (wr->send_flags & IB_SEND_SOLICITED)
2609  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2610  	return 0;
2611  }
2612  
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2613  static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2614  				 struct bnxt_qplib_swqe *wqe)
2615  {
2616  	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2617  	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2618  
2619  	if (wr->send_flags & IB_SEND_SIGNALED)
2620  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2621  	if (wr->send_flags & IB_SEND_SOLICITED)
2622  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2623  
2624  	return 0;
2625  }
2626  
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)2627  static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2628  				 struct bnxt_qplib_swqe *wqe)
2629  {
2630  	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2631  	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2632  	int access = wr->access;
2633  
2634  	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2635  	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2636  	wqe->frmr.page_list = mr->pages;
2637  	wqe->frmr.page_list_len = mr->npages;
2638  	wqe->frmr.levels = qplib_frpl->hwq.level;
2639  	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2640  
2641  	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2642  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2643  
2644  	if (access & IB_ACCESS_LOCAL_WRITE)
2645  		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2646  	if (access & IB_ACCESS_REMOTE_READ)
2647  		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2648  	if (access & IB_ACCESS_REMOTE_WRITE)
2649  		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2650  	if (access & IB_ACCESS_REMOTE_ATOMIC)
2651  		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2652  	if (access & IB_ACCESS_MW_BIND)
2653  		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2654  
2655  	wqe->frmr.l_key = wr->key;
2656  	wqe->frmr.length = wr->mr->length;
2657  	wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2658  	wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2659  	wqe->frmr.va = wr->mr->iova;
2660  	return 0;
2661  }
2662  
bnxt_re_copy_inline_data(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2663  static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2664  				    const struct ib_send_wr *wr,
2665  				    struct bnxt_qplib_swqe *wqe)
2666  {
2667  	/*  Copy the inline data to the data  field */
2668  	u8 *in_data;
2669  	u32 i, sge_len;
2670  	void *sge_addr;
2671  
2672  	in_data = wqe->inline_data;
2673  	for (i = 0; i < wr->num_sge; i++) {
2674  		sge_addr = (void *)(unsigned long)
2675  				wr->sg_list[i].addr;
2676  		sge_len = wr->sg_list[i].length;
2677  
2678  		if ((sge_len + wqe->inline_len) >
2679  		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2680  			ibdev_err(&rdev->ibdev,
2681  				  "Inline data size requested > supported value");
2682  			return -EINVAL;
2683  		}
2684  		sge_len = wr->sg_list[i].length;
2685  
2686  		memcpy(in_data, sge_addr, sge_len);
2687  		in_data += wr->sg_list[i].length;
2688  		wqe->inline_len += wr->sg_list[i].length;
2689  	}
2690  	return wqe->inline_len;
2691  }
2692  
bnxt_re_copy_wr_payload(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2693  static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2694  				   const struct ib_send_wr *wr,
2695  				   struct bnxt_qplib_swqe *wqe)
2696  {
2697  	int payload_sz = 0;
2698  
2699  	if (wr->send_flags & IB_SEND_INLINE)
2700  		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2701  	else
2702  		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2703  					       wqe->num_sge);
2704  
2705  	return payload_sz;
2706  }
2707  
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)2708  static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2709  {
2710  	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2711  	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2712  	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2713  	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2714  		int qp_attr_mask;
2715  		struct ib_qp_attr qp_attr;
2716  
2717  		qp_attr_mask = IB_QP_STATE;
2718  		qp_attr.qp_state = IB_QPS_RTS;
2719  		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2720  		qp->qplib_qp.wqe_cnt = 0;
2721  	}
2722  }
2723  
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)2724  static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2725  				       struct bnxt_re_qp *qp,
2726  				       const struct ib_send_wr *wr)
2727  {
2728  	int rc = 0, payload_sz = 0;
2729  	unsigned long flags;
2730  
2731  	spin_lock_irqsave(&qp->sq_lock, flags);
2732  	while (wr) {
2733  		struct bnxt_qplib_swqe wqe = {};
2734  
2735  		/* Common */
2736  		wqe.num_sge = wr->num_sge;
2737  		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2738  			ibdev_err(&rdev->ibdev,
2739  				  "Limit exceeded for Send SGEs");
2740  			rc = -EINVAL;
2741  			goto bad;
2742  		}
2743  
2744  		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2745  		if (payload_sz < 0) {
2746  			rc = -EINVAL;
2747  			goto bad;
2748  		}
2749  		wqe.wr_id = wr->wr_id;
2750  
2751  		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2752  
2753  		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2754  		if (!rc)
2755  			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2756  bad:
2757  		if (rc) {
2758  			ibdev_err(&rdev->ibdev,
2759  				  "Post send failed opcode = %#x rc = %d",
2760  				  wr->opcode, rc);
2761  			break;
2762  		}
2763  		wr = wr->next;
2764  	}
2765  	bnxt_qplib_post_send_db(&qp->qplib_qp);
2766  	bnxt_ud_qp_hw_stall_workaround(qp);
2767  	spin_unlock_irqrestore(&qp->sq_lock, flags);
2768  	return rc;
2769  }
2770  
bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe * wqe)2771  static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
2772  {
2773  	/* Need unconditional fence for non-wire memory opcode
2774  	 * to work as expected.
2775  	 */
2776  	if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
2777  	    wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
2778  	    wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
2779  	    wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
2780  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2781  }
2782  
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2783  int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2784  		      const struct ib_send_wr **bad_wr)
2785  {
2786  	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2787  	struct bnxt_qplib_swqe wqe;
2788  	int rc = 0, payload_sz = 0;
2789  	unsigned long flags;
2790  
2791  	spin_lock_irqsave(&qp->sq_lock, flags);
2792  	while (wr) {
2793  		/* House keeping */
2794  		memset(&wqe, 0, sizeof(wqe));
2795  
2796  		/* Common */
2797  		wqe.num_sge = wr->num_sge;
2798  		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2799  			ibdev_err(&qp->rdev->ibdev,
2800  				  "Limit exceeded for Send SGEs");
2801  			rc = -EINVAL;
2802  			goto bad;
2803  		}
2804  
2805  		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2806  		if (payload_sz < 0) {
2807  			rc = -EINVAL;
2808  			goto bad;
2809  		}
2810  		wqe.wr_id = wr->wr_id;
2811  
2812  		switch (wr->opcode) {
2813  		case IB_WR_SEND:
2814  		case IB_WR_SEND_WITH_IMM:
2815  			if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2816  				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2817  							       payload_sz);
2818  				if (rc)
2819  					goto bad;
2820  				wqe.rawqp1.lflags |=
2821  					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2822  			}
2823  			switch (wr->send_flags) {
2824  			case IB_SEND_IP_CSUM:
2825  				wqe.rawqp1.lflags |=
2826  					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2827  				break;
2828  			default:
2829  				break;
2830  			}
2831  			fallthrough;
2832  		case IB_WR_SEND_WITH_INV:
2833  			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2834  			break;
2835  		case IB_WR_RDMA_WRITE:
2836  		case IB_WR_RDMA_WRITE_WITH_IMM:
2837  		case IB_WR_RDMA_READ:
2838  			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2839  			break;
2840  		case IB_WR_ATOMIC_CMP_AND_SWP:
2841  		case IB_WR_ATOMIC_FETCH_AND_ADD:
2842  			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2843  			break;
2844  		case IB_WR_RDMA_READ_WITH_INV:
2845  			ibdev_err(&qp->rdev->ibdev,
2846  				  "RDMA Read with Invalidate is not supported");
2847  			rc = -EINVAL;
2848  			goto bad;
2849  		case IB_WR_LOCAL_INV:
2850  			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2851  			break;
2852  		case IB_WR_REG_MR:
2853  			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2854  			break;
2855  		default:
2856  			/* Unsupported WRs */
2857  			ibdev_err(&qp->rdev->ibdev,
2858  				  "WR (%#x) is not supported", wr->opcode);
2859  			rc = -EINVAL;
2860  			goto bad;
2861  		}
2862  		if (!rc) {
2863  			if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2864  				bnxt_re_legacy_set_uc_fence(&wqe);
2865  			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2866  		}
2867  bad:
2868  		if (rc) {
2869  			ibdev_err(&qp->rdev->ibdev,
2870  				  "post_send failed op:%#x qps = %#x rc = %d\n",
2871  				  wr->opcode, qp->qplib_qp.state, rc);
2872  			*bad_wr = wr;
2873  			break;
2874  		}
2875  		wr = wr->next;
2876  	}
2877  	bnxt_qplib_post_send_db(&qp->qplib_qp);
2878  	bnxt_ud_qp_hw_stall_workaround(qp);
2879  	spin_unlock_irqrestore(&qp->sq_lock, flags);
2880  
2881  	return rc;
2882  }
2883  
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_recv_wr * wr)2884  static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2885  				       struct bnxt_re_qp *qp,
2886  				       const struct ib_recv_wr *wr)
2887  {
2888  	struct bnxt_qplib_swqe wqe;
2889  	int rc = 0;
2890  
2891  	while (wr) {
2892  		/* House keeping */
2893  		memset(&wqe, 0, sizeof(wqe));
2894  
2895  		/* Common */
2896  		wqe.num_sge = wr->num_sge;
2897  		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2898  			ibdev_err(&rdev->ibdev,
2899  				  "Limit exceeded for Receive SGEs");
2900  			rc = -EINVAL;
2901  			break;
2902  		}
2903  		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2904  		wqe.wr_id = wr->wr_id;
2905  		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2906  
2907  		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2908  		if (rc)
2909  			break;
2910  
2911  		wr = wr->next;
2912  	}
2913  	if (!rc)
2914  		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2915  	return rc;
2916  }
2917  
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2918  int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2919  		      const struct ib_recv_wr **bad_wr)
2920  {
2921  	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2922  	struct bnxt_qplib_swqe wqe;
2923  	int rc = 0, payload_sz = 0;
2924  	unsigned long flags;
2925  	u32 count = 0;
2926  
2927  	spin_lock_irqsave(&qp->rq_lock, flags);
2928  	while (wr) {
2929  		/* House keeping */
2930  		memset(&wqe, 0, sizeof(wqe));
2931  
2932  		/* Common */
2933  		wqe.num_sge = wr->num_sge;
2934  		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2935  			ibdev_err(&qp->rdev->ibdev,
2936  				  "Limit exceeded for Receive SGEs");
2937  			rc = -EINVAL;
2938  			*bad_wr = wr;
2939  			break;
2940  		}
2941  
2942  		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2943  					       wr->num_sge);
2944  		wqe.wr_id = wr->wr_id;
2945  		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2946  
2947  		if (ib_qp->qp_type == IB_QPT_GSI &&
2948  		    qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2949  			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2950  							      payload_sz);
2951  		if (!rc)
2952  			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2953  		if (rc) {
2954  			*bad_wr = wr;
2955  			break;
2956  		}
2957  
2958  		/* Ring DB if the RQEs posted reaches a threshold value */
2959  		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2960  			bnxt_qplib_post_recv_db(&qp->qplib_qp);
2961  			count = 0;
2962  		}
2963  
2964  		wr = wr->next;
2965  	}
2966  
2967  	if (count)
2968  		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2969  
2970  	spin_unlock_irqrestore(&qp->rq_lock, flags);
2971  
2972  	return rc;
2973  }
2974  
2975  /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)2976  int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2977  {
2978  	struct bnxt_qplib_chip_ctx *cctx;
2979  	struct bnxt_qplib_nq *nq;
2980  	struct bnxt_re_dev *rdev;
2981  	struct bnxt_re_cq *cq;
2982  
2983  	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2984  	rdev = cq->rdev;
2985  	nq = cq->qplib_cq.nq;
2986  	cctx = rdev->chip_ctx;
2987  
2988  	if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
2989  		free_page((unsigned long)cq->uctx_cq_page);
2990  		hash_del(&cq->hash_entry);
2991  	}
2992  	bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2993  	ib_umem_release(cq->umem);
2994  
2995  	atomic_dec(&rdev->stats.res.cq_count);
2996  	nq->budget--;
2997  	kfree(cq->cql);
2998  	return 0;
2999  }
3000  
bnxt_re_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)3001  int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
3002  		      struct uverbs_attr_bundle *attrs)
3003  {
3004  	struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3005  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
3006  	struct ib_udata *udata = &attrs->driver_udata;
3007  	struct bnxt_re_ucontext *uctx =
3008  		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3009  	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3010  	struct bnxt_qplib_chip_ctx *cctx;
3011  	struct bnxt_qplib_nq *nq = NULL;
3012  	unsigned int nq_alloc_cnt;
3013  	int cqe = attr->cqe;
3014  	int rc, entries;
3015  	u32 active_cqs;
3016  
3017  	if (attr->flags)
3018  		return -EOPNOTSUPP;
3019  
3020  	/* Validate CQ fields */
3021  	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3022  		ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
3023  		return -EINVAL;
3024  	}
3025  
3026  	cq->rdev = rdev;
3027  	cctx = rdev->chip_ctx;
3028  	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
3029  
3030  	entries = bnxt_re_init_depth(cqe + 1, uctx);
3031  	if (entries > dev_attr->max_cq_wqes + 1)
3032  		entries = dev_attr->max_cq_wqes + 1;
3033  
3034  	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3035  	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3036  	if (udata) {
3037  		struct bnxt_re_cq_req req;
3038  		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3039  			rc = -EFAULT;
3040  			goto fail;
3041  		}
3042  
3043  		cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3044  				       entries * sizeof(struct cq_base),
3045  				       IB_ACCESS_LOCAL_WRITE);
3046  		if (IS_ERR(cq->umem)) {
3047  			rc = PTR_ERR(cq->umem);
3048  			goto fail;
3049  		}
3050  		cq->qplib_cq.sg_info.umem = cq->umem;
3051  		cq->qplib_cq.dpi = &uctx->dpi;
3052  	} else {
3053  		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
3054  		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3055  				  GFP_KERNEL);
3056  		if (!cq->cql) {
3057  			rc = -ENOMEM;
3058  			goto fail;
3059  		}
3060  
3061  		cq->qplib_cq.dpi = &rdev->dpi_privileged;
3062  	}
3063  	/*
3064  	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
3065  	 * used for getting the NQ index.
3066  	 */
3067  	nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
3068  	nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
3069  	cq->qplib_cq.max_wqe = entries;
3070  	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
3071  	cq->qplib_cq.nq	= nq;
3072  
3073  	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
3074  	if (rc) {
3075  		ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
3076  		goto fail;
3077  	}
3078  
3079  	cq->ib_cq.cqe = entries;
3080  	cq->cq_period = cq->qplib_cq.period;
3081  	nq->budget++;
3082  
3083  	active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
3084  	if (active_cqs > rdev->stats.res.cq_watermark)
3085  		rdev->stats.res.cq_watermark = active_cqs;
3086  	spin_lock_init(&cq->cq_lock);
3087  
3088  	if (udata) {
3089  		struct bnxt_re_cq_resp resp = {};
3090  
3091  		if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3092  			hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
3093  			/* Allocate a page */
3094  			cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
3095  			if (!cq->uctx_cq_page) {
3096  				rc = -ENOMEM;
3097  				goto c2fail;
3098  			}
3099  			resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
3100  		}
3101  		resp.cqid = cq->qplib_cq.id;
3102  		resp.tail = cq->qplib_cq.hwq.cons;
3103  		resp.phase = cq->qplib_cq.period;
3104  		resp.rsvd = 0;
3105  		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
3106  		if (rc) {
3107  			ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3108  			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3109  			goto free_mem;
3110  		}
3111  	}
3112  
3113  	return 0;
3114  
3115  free_mem:
3116  	free_page((unsigned long)cq->uctx_cq_page);
3117  c2fail:
3118  	ib_umem_release(cq->umem);
3119  fail:
3120  	kfree(cq->cql);
3121  	return rc;
3122  }
3123  
bnxt_re_resize_cq_complete(struct bnxt_re_cq * cq)3124  static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3125  {
3126  	struct bnxt_re_dev *rdev = cq->rdev;
3127  
3128  	bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3129  
3130  	cq->qplib_cq.max_wqe = cq->resize_cqe;
3131  	if (cq->resize_umem) {
3132  		ib_umem_release(cq->umem);
3133  		cq->umem = cq->resize_umem;
3134  		cq->resize_umem = NULL;
3135  		cq->resize_cqe = 0;
3136  	}
3137  }
3138  
bnxt_re_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)3139  int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3140  {
3141  	struct bnxt_qplib_sg_info sg_info = {};
3142  	struct bnxt_qplib_dpi *orig_dpi = NULL;
3143  	struct bnxt_qplib_dev_attr *dev_attr;
3144  	struct bnxt_re_ucontext *uctx = NULL;
3145  	struct bnxt_re_resize_cq_req req;
3146  	struct bnxt_re_dev *rdev;
3147  	struct bnxt_re_cq *cq;
3148  	int rc, entries;
3149  
3150  	cq =  container_of(ibcq, struct bnxt_re_cq, ib_cq);
3151  	rdev = cq->rdev;
3152  	dev_attr = &rdev->dev_attr;
3153  	if (!ibcq->uobject) {
3154  		ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3155  		return -EOPNOTSUPP;
3156  	}
3157  
3158  	if (cq->resize_umem) {
3159  		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3160  			  cq->qplib_cq.id);
3161  		return -EBUSY;
3162  	}
3163  
3164  	/* Check the requested cq depth out of supported depth */
3165  	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3166  		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3167  			  cq->qplib_cq.id, cqe);
3168  		return -EINVAL;
3169  	}
3170  
3171  	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3172  	entries = bnxt_re_init_depth(cqe + 1, uctx);
3173  	if (entries > dev_attr->max_cq_wqes + 1)
3174  		entries = dev_attr->max_cq_wqes + 1;
3175  
3176  	/* uverbs consumer */
3177  	if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3178  		rc = -EFAULT;
3179  		goto fail;
3180  	}
3181  
3182  	cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3183  				      entries * sizeof(struct cq_base),
3184  				      IB_ACCESS_LOCAL_WRITE);
3185  	if (IS_ERR(cq->resize_umem)) {
3186  		rc = PTR_ERR(cq->resize_umem);
3187  		cq->resize_umem = NULL;
3188  		ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
3189  			  __func__, rc);
3190  		goto fail;
3191  	}
3192  	cq->resize_cqe = entries;
3193  	memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3194  	orig_dpi = cq->qplib_cq.dpi;
3195  
3196  	cq->qplib_cq.sg_info.umem = cq->resize_umem;
3197  	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3198  	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3199  	cq->qplib_cq.dpi = &uctx->dpi;
3200  
3201  	rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3202  	if (rc) {
3203  		ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3204  			  cq->qplib_cq.id);
3205  		goto fail;
3206  	}
3207  
3208  	cq->ib_cq.cqe = cq->resize_cqe;
3209  	atomic_inc(&rdev->stats.res.resize_count);
3210  
3211  	return 0;
3212  
3213  fail:
3214  	if (cq->resize_umem) {
3215  		ib_umem_release(cq->resize_umem);
3216  		cq->resize_umem = NULL;
3217  		cq->resize_cqe = 0;
3218  		memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3219  		cq->qplib_cq.dpi = orig_dpi;
3220  	}
3221  	return rc;
3222  }
3223  
__req_to_ib_wc_status(u8 qstatus)3224  static u8 __req_to_ib_wc_status(u8 qstatus)
3225  {
3226  	switch (qstatus) {
3227  	case CQ_REQ_STATUS_OK:
3228  		return IB_WC_SUCCESS;
3229  	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3230  		return IB_WC_BAD_RESP_ERR;
3231  	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3232  		return IB_WC_LOC_LEN_ERR;
3233  	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3234  		return IB_WC_LOC_QP_OP_ERR;
3235  	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3236  		return IB_WC_LOC_PROT_ERR;
3237  	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3238  		return IB_WC_GENERAL_ERR;
3239  	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3240  		return IB_WC_REM_INV_REQ_ERR;
3241  	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3242  		return IB_WC_REM_ACCESS_ERR;
3243  	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3244  		return IB_WC_REM_OP_ERR;
3245  	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3246  		return IB_WC_RNR_RETRY_EXC_ERR;
3247  	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3248  		return IB_WC_RETRY_EXC_ERR;
3249  	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3250  		return IB_WC_WR_FLUSH_ERR;
3251  	default:
3252  		return IB_WC_GENERAL_ERR;
3253  	}
3254  	return 0;
3255  }
3256  
__rawqp1_to_ib_wc_status(u8 qstatus)3257  static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3258  {
3259  	switch (qstatus) {
3260  	case CQ_RES_RAWETH_QP1_STATUS_OK:
3261  		return IB_WC_SUCCESS;
3262  	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3263  		return IB_WC_LOC_ACCESS_ERR;
3264  	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3265  		return IB_WC_LOC_LEN_ERR;
3266  	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3267  		return IB_WC_LOC_PROT_ERR;
3268  	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3269  		return IB_WC_LOC_QP_OP_ERR;
3270  	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3271  		return IB_WC_GENERAL_ERR;
3272  	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3273  		return IB_WC_WR_FLUSH_ERR;
3274  	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3275  		return IB_WC_WR_FLUSH_ERR;
3276  	default:
3277  		return IB_WC_GENERAL_ERR;
3278  	}
3279  }
3280  
__rc_to_ib_wc_status(u8 qstatus)3281  static u8 __rc_to_ib_wc_status(u8 qstatus)
3282  {
3283  	switch (qstatus) {
3284  	case CQ_RES_RC_STATUS_OK:
3285  		return IB_WC_SUCCESS;
3286  	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3287  		return IB_WC_LOC_ACCESS_ERR;
3288  	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3289  		return IB_WC_LOC_LEN_ERR;
3290  	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3291  		return IB_WC_LOC_PROT_ERR;
3292  	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3293  		return IB_WC_LOC_QP_OP_ERR;
3294  	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3295  		return IB_WC_GENERAL_ERR;
3296  	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3297  		return IB_WC_REM_INV_REQ_ERR;
3298  	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3299  		return IB_WC_WR_FLUSH_ERR;
3300  	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3301  		return IB_WC_WR_FLUSH_ERR;
3302  	default:
3303  		return IB_WC_GENERAL_ERR;
3304  	}
3305  }
3306  
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3307  static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3308  {
3309  	switch (cqe->type) {
3310  	case BNXT_QPLIB_SWQE_TYPE_SEND:
3311  		wc->opcode = IB_WC_SEND;
3312  		break;
3313  	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3314  		wc->opcode = IB_WC_SEND;
3315  		wc->wc_flags |= IB_WC_WITH_IMM;
3316  		break;
3317  	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3318  		wc->opcode = IB_WC_SEND;
3319  		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3320  		break;
3321  	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3322  		wc->opcode = IB_WC_RDMA_WRITE;
3323  		break;
3324  	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3325  		wc->opcode = IB_WC_RDMA_WRITE;
3326  		wc->wc_flags |= IB_WC_WITH_IMM;
3327  		break;
3328  	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3329  		wc->opcode = IB_WC_RDMA_READ;
3330  		break;
3331  	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3332  		wc->opcode = IB_WC_COMP_SWAP;
3333  		break;
3334  	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3335  		wc->opcode = IB_WC_FETCH_ADD;
3336  		break;
3337  	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3338  		wc->opcode = IB_WC_LOCAL_INV;
3339  		break;
3340  	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3341  		wc->opcode = IB_WC_REG_MR;
3342  		break;
3343  	default:
3344  		wc->opcode = IB_WC_SEND;
3345  		break;
3346  	}
3347  
3348  	wc->status = __req_to_ib_wc_status(cqe->status);
3349  }
3350  
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)3351  static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3352  				     u16 raweth_qp1_flags2)
3353  {
3354  	bool is_ipv6 = false, is_ipv4 = false;
3355  
3356  	/* raweth_qp1_flags Bit 9-6 indicates itype */
3357  	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3358  	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3359  		return -1;
3360  
3361  	if (raweth_qp1_flags2 &
3362  	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3363  	    raweth_qp1_flags2 &
3364  	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3365  		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3366  		(raweth_qp1_flags2 &
3367  		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3368  			(is_ipv6 = true) : (is_ipv4 = true);
3369  		return ((is_ipv6) ?
3370  			 BNXT_RE_ROCEV2_IPV6_PACKET :
3371  			 BNXT_RE_ROCEV2_IPV4_PACKET);
3372  	} else {
3373  		return BNXT_RE_ROCE_V1_PACKET;
3374  	}
3375  }
3376  
bnxt_re_to_ib_nw_type(int nw_type)3377  static int bnxt_re_to_ib_nw_type(int nw_type)
3378  {
3379  	u8 nw_hdr_type = 0xFF;
3380  
3381  	switch (nw_type) {
3382  	case BNXT_RE_ROCE_V1_PACKET:
3383  		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3384  		break;
3385  	case BNXT_RE_ROCEV2_IPV4_PACKET:
3386  		nw_hdr_type = RDMA_NETWORK_IPV4;
3387  		break;
3388  	case BNXT_RE_ROCEV2_IPV6_PACKET:
3389  		nw_hdr_type = RDMA_NETWORK_IPV6;
3390  		break;
3391  	}
3392  	return nw_hdr_type;
3393  }
3394  
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)3395  static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3396  				       void *rq_hdr_buf)
3397  {
3398  	u8 *tmp_buf = NULL;
3399  	struct ethhdr *eth_hdr;
3400  	u16 eth_type;
3401  	bool rc = false;
3402  
3403  	tmp_buf = (u8 *)rq_hdr_buf;
3404  	/*
3405  	 * If dest mac is not same as I/F mac, this could be a
3406  	 * loopback address or multicast address, check whether
3407  	 * it is a loopback packet
3408  	 */
3409  	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3410  		tmp_buf += 4;
3411  		/* Check the  ether type */
3412  		eth_hdr = (struct ethhdr *)tmp_buf;
3413  		eth_type = ntohs(eth_hdr->h_proto);
3414  		switch (eth_type) {
3415  		case ETH_P_IBOE:
3416  			rc = true;
3417  			break;
3418  		case ETH_P_IP:
3419  		case ETH_P_IPV6: {
3420  			u32 len;
3421  			struct udphdr *udp_hdr;
3422  
3423  			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3424  						      sizeof(struct ipv6hdr));
3425  			tmp_buf += sizeof(struct ethhdr) + len;
3426  			udp_hdr = (struct udphdr *)tmp_buf;
3427  			if (ntohs(udp_hdr->dest) ==
3428  				    ROCE_V2_UDP_DPORT)
3429  				rc = true;
3430  			break;
3431  			}
3432  		default:
3433  			break;
3434  		}
3435  	}
3436  
3437  	return rc;
3438  }
3439  
bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp * gsi_qp,struct bnxt_qplib_cqe * cqe)3440  static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3441  					 struct bnxt_qplib_cqe *cqe)
3442  {
3443  	struct bnxt_re_dev *rdev = gsi_qp->rdev;
3444  	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3445  	struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3446  	dma_addr_t shrq_hdr_buf_map;
3447  	struct ib_sge s_sge[2] = {};
3448  	struct ib_sge r_sge[2] = {};
3449  	struct bnxt_re_ah *gsi_sah;
3450  	struct ib_recv_wr rwr = {};
3451  	dma_addr_t rq_hdr_buf_map;
3452  	struct ib_ud_wr udwr = {};
3453  	struct ib_send_wr *swr;
3454  	u32 skip_bytes = 0;
3455  	int pkt_type = 0;
3456  	void *rq_hdr_buf;
3457  	u32 offset = 0;
3458  	u32 tbl_idx;
3459  	int rc;
3460  
3461  	swr = &udwr.wr;
3462  	tbl_idx = cqe->wr_id;
3463  
3464  	rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3465  			(tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3466  	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3467  							  tbl_idx);
3468  
3469  	/* Shadow QP header buffer */
3470  	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3471  							    tbl_idx);
3472  	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3473  
3474  	/* Store this cqe */
3475  	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3476  	sqp_entry->qp1_qp = gsi_qp;
3477  
3478  	/* Find packet type from the cqe */
3479  
3480  	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3481  					     cqe->raweth_qp1_flags2);
3482  	if (pkt_type < 0) {
3483  		ibdev_err(&rdev->ibdev, "Invalid packet\n");
3484  		return -EINVAL;
3485  	}
3486  
3487  	/* Adjust the offset for the user buffer and post in the rq */
3488  
3489  	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3490  		offset = 20;
3491  
3492  	/*
3493  	 * QP1 loopback packet has 4 bytes of internal header before
3494  	 * ether header. Skip these four bytes.
3495  	 */
3496  	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3497  		skip_bytes = 4;
3498  
3499  	/* First send SGE . Skip the ether header*/
3500  	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3501  			+ skip_bytes;
3502  	s_sge[0].lkey = 0xFFFFFFFF;
3503  	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3504  				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3505  
3506  	/* Second Send SGE */
3507  	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3508  			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3509  	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3510  		s_sge[1].addr += 8;
3511  	s_sge[1].lkey = 0xFFFFFFFF;
3512  	s_sge[1].length = 256;
3513  
3514  	/* First recv SGE */
3515  
3516  	r_sge[0].addr = shrq_hdr_buf_map;
3517  	r_sge[0].lkey = 0xFFFFFFFF;
3518  	r_sge[0].length = 40;
3519  
3520  	r_sge[1].addr = sqp_entry->sge.addr + offset;
3521  	r_sge[1].lkey = sqp_entry->sge.lkey;
3522  	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3523  
3524  	/* Create receive work request */
3525  	rwr.num_sge = 2;
3526  	rwr.sg_list = r_sge;
3527  	rwr.wr_id = tbl_idx;
3528  	rwr.next = NULL;
3529  
3530  	rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3531  	if (rc) {
3532  		ibdev_err(&rdev->ibdev,
3533  			  "Failed to post Rx buffers to shadow QP");
3534  		return -ENOMEM;
3535  	}
3536  
3537  	swr->num_sge = 2;
3538  	swr->sg_list = s_sge;
3539  	swr->wr_id = tbl_idx;
3540  	swr->opcode = IB_WR_SEND;
3541  	swr->next = NULL;
3542  	gsi_sah = rdev->gsi_ctx.gsi_sah;
3543  	udwr.ah = &gsi_sah->ib_ah;
3544  	udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3545  	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3546  
3547  	/* post data received  in the send queue */
3548  	return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3549  }
3550  
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3551  static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3552  					  struct bnxt_qplib_cqe *cqe)
3553  {
3554  	wc->opcode = IB_WC_RECV;
3555  	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3556  	wc->wc_flags |= IB_WC_GRH;
3557  }
3558  
bnxt_re_check_if_vlan_valid(struct bnxt_re_dev * rdev,u16 vlan_id)3559  static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3560  					u16 vlan_id)
3561  {
3562  	/*
3563  	 * Check if the vlan is configured in the host.  If not configured, it
3564  	 * can be a transparent VLAN. So dont report the vlan id.
3565  	 */
3566  	if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3567  				      htons(ETH_P_8021Q), vlan_id))
3568  		return false;
3569  	return true;
3570  }
3571  
bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)3572  static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3573  				u16 *vid, u8 *sl)
3574  {
3575  	bool ret = false;
3576  	u32 metadata;
3577  	u16 tpid;
3578  
3579  	metadata = orig_cqe->raweth_qp1_metadata;
3580  	if (orig_cqe->raweth_qp1_flags2 &
3581  		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3582  		tpid = ((metadata &
3583  			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3584  			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3585  		if (tpid == ETH_P_8021Q) {
3586  			*vid = metadata &
3587  			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3588  			*sl = (metadata &
3589  			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3590  			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3591  			ret = true;
3592  		}
3593  	}
3594  
3595  	return ret;
3596  }
3597  
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3598  static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3599  				      struct bnxt_qplib_cqe *cqe)
3600  {
3601  	wc->opcode = IB_WC_RECV;
3602  	wc->status = __rc_to_ib_wc_status(cqe->status);
3603  
3604  	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3605  		wc->wc_flags |= IB_WC_WITH_IMM;
3606  	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3607  		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3608  	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3609  	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3610  		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3611  }
3612  
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * gsi_sqp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3613  static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3614  					     struct ib_wc *wc,
3615  					     struct bnxt_qplib_cqe *cqe)
3616  {
3617  	struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3618  	struct bnxt_re_qp *gsi_qp = NULL;
3619  	struct bnxt_qplib_cqe *orig_cqe = NULL;
3620  	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3621  	int nw_type;
3622  	u32 tbl_idx;
3623  	u16 vlan_id;
3624  	u8 sl;
3625  
3626  	tbl_idx = cqe->wr_id;
3627  
3628  	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3629  	gsi_qp = sqp_entry->qp1_qp;
3630  	orig_cqe = &sqp_entry->cqe;
3631  
3632  	wc->wr_id = sqp_entry->wrid;
3633  	wc->byte_len = orig_cqe->length;
3634  	wc->qp = &gsi_qp->ib_qp;
3635  
3636  	wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata));
3637  	wc->src_qp = orig_cqe->src_qp;
3638  	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3639  	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3640  		if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3641  			wc->vlan_id = vlan_id;
3642  			wc->sl = sl;
3643  			wc->wc_flags |= IB_WC_WITH_VLAN;
3644  		}
3645  	}
3646  	wc->port_num = 1;
3647  	wc->vendor_err = orig_cqe->status;
3648  
3649  	wc->opcode = IB_WC_RECV;
3650  	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3651  	wc->wc_flags |= IB_WC_GRH;
3652  
3653  	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3654  					    orig_cqe->raweth_qp1_flags2);
3655  	if (nw_type >= 0) {
3656  		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3657  		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3658  	}
3659  }
3660  
bnxt_re_process_res_ud_wc(struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3661  static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3662  				      struct ib_wc *wc,
3663  				      struct bnxt_qplib_cqe *cqe)
3664  {
3665  	struct bnxt_re_dev *rdev;
3666  	u16 vlan_id = 0;
3667  	u8 nw_type;
3668  
3669  	rdev = qp->rdev;
3670  	wc->opcode = IB_WC_RECV;
3671  	wc->status = __rc_to_ib_wc_status(cqe->status);
3672  
3673  	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3674  		wc->wc_flags |= IB_WC_WITH_IMM;
3675  	/* report only on GSI QP for Thor */
3676  	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3677  		wc->wc_flags |= IB_WC_GRH;
3678  		memcpy(wc->smac, cqe->smac, ETH_ALEN);
3679  		wc->wc_flags |= IB_WC_WITH_SMAC;
3680  		if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3681  			vlan_id = (cqe->cfa_meta & 0xFFF);
3682  		}
3683  		/* Mark only if vlan_id is non zero */
3684  		if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3685  			wc->vlan_id = vlan_id;
3686  			wc->wc_flags |= IB_WC_WITH_VLAN;
3687  		}
3688  		nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3689  			   CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3690  		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3691  		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3692  	}
3693  
3694  }
3695  
send_phantom_wqe(struct bnxt_re_qp * qp)3696  static int send_phantom_wqe(struct bnxt_re_qp *qp)
3697  {
3698  	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3699  	unsigned long flags;
3700  	int rc;
3701  
3702  	spin_lock_irqsave(&qp->sq_lock, flags);
3703  
3704  	rc = bnxt_re_bind_fence_mw(lib_qp);
3705  	if (!rc) {
3706  		lib_qp->sq.phantom_wqe_cnt++;
3707  		ibdev_dbg(&qp->rdev->ibdev,
3708  			  "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3709  			  lib_qp->id, lib_qp->sq.hwq.prod,
3710  			  HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3711  			  lib_qp->sq.phantom_wqe_cnt);
3712  	}
3713  
3714  	spin_unlock_irqrestore(&qp->sq_lock, flags);
3715  	return rc;
3716  }
3717  
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)3718  int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3719  {
3720  	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3721  	struct bnxt_re_qp *qp, *sh_qp;
3722  	struct bnxt_qplib_cqe *cqe;
3723  	int i, ncqe, budget;
3724  	struct bnxt_qplib_q *sq;
3725  	struct bnxt_qplib_qp *lib_qp;
3726  	u32 tbl_idx;
3727  	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3728  	unsigned long flags;
3729  
3730  	/* User CQ; the only processing we do is to
3731  	 * complete any pending CQ resize operation.
3732  	 */
3733  	if (cq->umem) {
3734  		if (cq->resize_umem)
3735  			bnxt_re_resize_cq_complete(cq);
3736  		return 0;
3737  	}
3738  
3739  	spin_lock_irqsave(&cq->cq_lock, flags);
3740  	budget = min_t(u32, num_entries, cq->max_cql);
3741  	num_entries = budget;
3742  	if (!cq->cql) {
3743  		ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3744  		goto exit;
3745  	}
3746  	cqe = &cq->cql[0];
3747  	while (budget) {
3748  		lib_qp = NULL;
3749  		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3750  		if (lib_qp) {
3751  			sq = &lib_qp->sq;
3752  			if (sq->send_phantom) {
3753  				qp = container_of(lib_qp,
3754  						  struct bnxt_re_qp, qplib_qp);
3755  				if (send_phantom_wqe(qp) == -ENOMEM)
3756  					ibdev_err(&cq->rdev->ibdev,
3757  						  "Phantom failed! Scheduled to send again\n");
3758  				else
3759  					sq->send_phantom = false;
3760  			}
3761  		}
3762  		if (ncqe < budget)
3763  			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3764  							      cqe + ncqe,
3765  							      budget - ncqe);
3766  
3767  		if (!ncqe)
3768  			break;
3769  
3770  		for (i = 0; i < ncqe; i++, cqe++) {
3771  			/* Transcribe each qplib_wqe back to ib_wc */
3772  			memset(wc, 0, sizeof(*wc));
3773  
3774  			wc->wr_id = cqe->wr_id;
3775  			wc->byte_len = cqe->length;
3776  			qp = container_of
3777  				((struct bnxt_qplib_qp *)
3778  				 (unsigned long)(cqe->qp_handle),
3779  				 struct bnxt_re_qp, qplib_qp);
3780  			wc->qp = &qp->ib_qp;
3781  			wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata));
3782  			wc->src_qp = cqe->src_qp;
3783  			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3784  			wc->port_num = 1;
3785  			wc->vendor_err = cqe->status;
3786  
3787  			switch (cqe->opcode) {
3788  			case CQ_BASE_CQE_TYPE_REQ:
3789  				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3790  				if (sh_qp &&
3791  				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3792  					/* Handle this completion with
3793  					 * the stored completion
3794  					 */
3795  					memset(wc, 0, sizeof(*wc));
3796  					continue;
3797  				}
3798  				bnxt_re_process_req_wc(wc, cqe);
3799  				break;
3800  			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3801  				if (!cqe->status) {
3802  					int rc = 0;
3803  
3804  					rc = bnxt_re_process_raw_qp_pkt_rx
3805  								(qp, cqe);
3806  					if (!rc) {
3807  						memset(wc, 0, sizeof(*wc));
3808  						continue;
3809  					}
3810  					cqe->status = -1;
3811  				}
3812  				/* Errors need not be looped back.
3813  				 * But change the wr_id to the one
3814  				 * stored in the table
3815  				 */
3816  				tbl_idx = cqe->wr_id;
3817  				sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3818  				wc->wr_id = sqp_entry->wrid;
3819  				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3820  				break;
3821  			case CQ_BASE_CQE_TYPE_RES_RC:
3822  				bnxt_re_process_res_rc_wc(wc, cqe);
3823  				break;
3824  			case CQ_BASE_CQE_TYPE_RES_UD:
3825  				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3826  				if (sh_qp &&
3827  				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3828  					/* Handle this completion with
3829  					 * the stored completion
3830  					 */
3831  					if (cqe->status) {
3832  						continue;
3833  					} else {
3834  						bnxt_re_process_res_shadow_qp_wc
3835  								(qp, wc, cqe);
3836  						break;
3837  					}
3838  				}
3839  				bnxt_re_process_res_ud_wc(qp, wc, cqe);
3840  				break;
3841  			default:
3842  				ibdev_err(&cq->rdev->ibdev,
3843  					  "POLL CQ : type 0x%x not handled",
3844  					  cqe->opcode);
3845  				continue;
3846  			}
3847  			wc++;
3848  			budget--;
3849  		}
3850  	}
3851  exit:
3852  	spin_unlock_irqrestore(&cq->cq_lock, flags);
3853  	return num_entries - budget;
3854  }
3855  
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)3856  int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3857  			  enum ib_cq_notify_flags ib_cqn_flags)
3858  {
3859  	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3860  	int type = 0, rc = 0;
3861  	unsigned long flags;
3862  
3863  	spin_lock_irqsave(&cq->cq_lock, flags);
3864  	/* Trigger on the very next completion */
3865  	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3866  		type = DBC_DBC_TYPE_CQ_ARMALL;
3867  	/* Trigger on the next solicited completion */
3868  	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3869  		type = DBC_DBC_TYPE_CQ_ARMSE;
3870  
3871  	/* Poll to see if there are missed events */
3872  	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3873  	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3874  		rc = 1;
3875  		goto exit;
3876  	}
3877  	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3878  
3879  exit:
3880  	spin_unlock_irqrestore(&cq->cq_lock, flags);
3881  	return rc;
3882  }
3883  
3884  /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)3885  struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3886  {
3887  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3888  	struct bnxt_re_dev *rdev = pd->rdev;
3889  	struct bnxt_re_mr *mr;
3890  	u32 active_mrs;
3891  	int rc;
3892  
3893  	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3894  	if (!mr)
3895  		return ERR_PTR(-ENOMEM);
3896  
3897  	mr->rdev = rdev;
3898  	mr->qplib_mr.pd = &pd->qplib_pd;
3899  	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
3900  	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3901  
3902  	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
3903  		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
3904  
3905  	/* Allocate and register 0 as the address */
3906  	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3907  	if (rc)
3908  		goto fail;
3909  
3910  	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3911  	mr->qplib_mr.total_size = -1; /* Infinte length */
3912  	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3913  			       PAGE_SIZE);
3914  	if (rc)
3915  		goto fail_mr;
3916  
3917  	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3918  	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3919  			       IB_ACCESS_REMOTE_ATOMIC))
3920  		mr->ib_mr.rkey = mr->ib_mr.lkey;
3921  	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3922  	if (active_mrs > rdev->stats.res.mr_watermark)
3923  		rdev->stats.res.mr_watermark = active_mrs;
3924  
3925  	return &mr->ib_mr;
3926  
3927  fail_mr:
3928  	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3929  fail:
3930  	kfree(mr);
3931  	return ERR_PTR(rc);
3932  }
3933  
bnxt_re_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3934  int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3935  {
3936  	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3937  	struct bnxt_re_dev *rdev = mr->rdev;
3938  	int rc;
3939  
3940  	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3941  	if (rc) {
3942  		ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3943  		return rc;
3944  	}
3945  
3946  	if (mr->pages) {
3947  		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3948  							&mr->qplib_frpl);
3949  		kfree(mr->pages);
3950  		mr->npages = 0;
3951  		mr->pages = NULL;
3952  	}
3953  	ib_umem_release(mr->ib_umem);
3954  
3955  	kfree(mr);
3956  	atomic_dec(&rdev->stats.res.mr_count);
3957  	return rc;
3958  }
3959  
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)3960  static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3961  {
3962  	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3963  
3964  	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3965  		return -ENOMEM;
3966  
3967  	mr->pages[mr->npages++] = addr;
3968  	return 0;
3969  }
3970  
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3971  int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3972  		      unsigned int *sg_offset)
3973  {
3974  	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3975  
3976  	mr->npages = 0;
3977  	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3978  }
3979  
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg)3980  struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3981  			       u32 max_num_sg)
3982  {
3983  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3984  	struct bnxt_re_dev *rdev = pd->rdev;
3985  	struct bnxt_re_mr *mr = NULL;
3986  	u32 active_mrs;
3987  	int rc;
3988  
3989  	if (type != IB_MR_TYPE_MEM_REG) {
3990  		ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3991  		return ERR_PTR(-EINVAL);
3992  	}
3993  	if (max_num_sg > MAX_PBL_LVL_1_PGS)
3994  		return ERR_PTR(-EINVAL);
3995  
3996  	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3997  	if (!mr)
3998  		return ERR_PTR(-ENOMEM);
3999  
4000  	mr->rdev = rdev;
4001  	mr->qplib_mr.pd = &pd->qplib_pd;
4002  	mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
4003  	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4004  
4005  	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4006  	if (rc)
4007  		goto bail;
4008  
4009  	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4010  	mr->ib_mr.rkey = mr->ib_mr.lkey;
4011  
4012  	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
4013  	if (!mr->pages) {
4014  		rc = -ENOMEM;
4015  		goto fail;
4016  	}
4017  	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4018  						 &mr->qplib_frpl, max_num_sg);
4019  	if (rc) {
4020  		ibdev_err(&rdev->ibdev,
4021  			  "Failed to allocate HW FR page list");
4022  		goto fail_mr;
4023  	}
4024  
4025  	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4026  	if (active_mrs > rdev->stats.res.mr_watermark)
4027  		rdev->stats.res.mr_watermark = active_mrs;
4028  	return &mr->ib_mr;
4029  
4030  fail_mr:
4031  	kfree(mr->pages);
4032  fail:
4033  	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4034  bail:
4035  	kfree(mr);
4036  	return ERR_PTR(rc);
4037  }
4038  
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)4039  struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
4040  			       struct ib_udata *udata)
4041  {
4042  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4043  	struct bnxt_re_dev *rdev = pd->rdev;
4044  	struct bnxt_re_mw *mw;
4045  	u32 active_mws;
4046  	int rc;
4047  
4048  	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
4049  	if (!mw)
4050  		return ERR_PTR(-ENOMEM);
4051  	mw->rdev = rdev;
4052  	mw->qplib_mw.pd = &pd->qplib_pd;
4053  
4054  	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4055  			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4056  			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4057  	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4058  	if (rc) {
4059  		ibdev_err(&rdev->ibdev, "Allocate MW failed!");
4060  		goto fail;
4061  	}
4062  	mw->ib_mw.rkey = mw->qplib_mw.rkey;
4063  
4064  	active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
4065  	if (active_mws > rdev->stats.res.mw_watermark)
4066  		rdev->stats.res.mw_watermark = active_mws;
4067  	return &mw->ib_mw;
4068  
4069  fail:
4070  	kfree(mw);
4071  	return ERR_PTR(rc);
4072  }
4073  
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)4074  int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4075  {
4076  	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
4077  	struct bnxt_re_dev *rdev = mw->rdev;
4078  	int rc;
4079  
4080  	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4081  	if (rc) {
4082  		ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
4083  		return rc;
4084  	}
4085  
4086  	kfree(mw);
4087  	atomic_dec(&rdev->stats.res.mw_count);
4088  	return rc;
4089  }
4090  
__bnxt_re_user_reg_mr(struct ib_pd * ib_pd,u64 length,u64 virt_addr,int mr_access_flags,struct ib_umem * umem)4091  static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
4092  					   int mr_access_flags, struct ib_umem *umem)
4093  {
4094  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4095  	struct bnxt_re_dev *rdev = pd->rdev;
4096  	unsigned long page_size;
4097  	struct bnxt_re_mr *mr;
4098  	int umem_pgs, rc;
4099  	u32 active_mrs;
4100  
4101  	if (length > BNXT_RE_MAX_MR_SIZE) {
4102  		ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4103  			  length, BNXT_RE_MAX_MR_SIZE);
4104  		return ERR_PTR(-ENOMEM);
4105  	}
4106  
4107  	page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4108  	if (!page_size) {
4109  		ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4110  		return ERR_PTR(-EINVAL);
4111  	}
4112  
4113  	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4114  	if (!mr)
4115  		return ERR_PTR(-ENOMEM);
4116  
4117  	mr->rdev = rdev;
4118  	mr->qplib_mr.pd = &pd->qplib_pd;
4119  	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
4120  	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4121  
4122  	if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
4123  		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4124  		if (rc) {
4125  			ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4126  			rc = -EIO;
4127  			goto free_mr;
4128  		}
4129  		/* The fixed portion of the rkey is the same as the lkey */
4130  		mr->ib_mr.rkey = mr->qplib_mr.rkey;
4131  	} else {
4132  		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
4133  	}
4134  	mr->ib_umem = umem;
4135  	mr->qplib_mr.va = virt_addr;
4136  	mr->qplib_mr.total_size = length;
4137  
4138  	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
4139  		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
4140  
4141  	umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4142  	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4143  			       umem_pgs, page_size);
4144  	if (rc) {
4145  		ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4146  		rc = -EIO;
4147  		goto free_mrw;
4148  	}
4149  
4150  	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4151  	mr->ib_mr.rkey = mr->qplib_mr.lkey;
4152  	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4153  	if (active_mrs > rdev->stats.res.mr_watermark)
4154  		rdev->stats.res.mr_watermark = active_mrs;
4155  
4156  	return &mr->ib_mr;
4157  
4158  free_mrw:
4159  	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4160  free_mr:
4161  	kfree(mr);
4162  	return ERR_PTR(rc);
4163  }
4164  
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)4165  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4166  				  u64 virt_addr, int mr_access_flags,
4167  				  struct ib_udata *udata)
4168  {
4169  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4170  	struct bnxt_re_dev *rdev = pd->rdev;
4171  	struct ib_umem *umem;
4172  	struct ib_mr *ib_mr;
4173  
4174  	umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4175  	if (IS_ERR(umem))
4176  		return ERR_CAST(umem);
4177  
4178  	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4179  	if (IS_ERR(ib_mr))
4180  		ib_umem_release(umem);
4181  	return ib_mr;
4182  }
4183  
bnxt_re_reg_user_mr_dmabuf(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int fd,int mr_access_flags,struct uverbs_attr_bundle * attrs)4184  struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4185  					 u64 length, u64 virt_addr, int fd,
4186  					 int mr_access_flags,
4187  					 struct uverbs_attr_bundle *attrs)
4188  {
4189  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4190  	struct bnxt_re_dev *rdev = pd->rdev;
4191  	struct ib_umem_dmabuf *umem_dmabuf;
4192  	struct ib_umem *umem;
4193  	struct ib_mr *ib_mr;
4194  
4195  	umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4196  						fd, mr_access_flags);
4197  	if (IS_ERR(umem_dmabuf))
4198  		return ERR_CAST(umem_dmabuf);
4199  
4200  	umem = &umem_dmabuf->umem;
4201  
4202  	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4203  	if (IS_ERR(ib_mr))
4204  		ib_umem_release(umem);
4205  	return ib_mr;
4206  }
4207  
bnxt_re_alloc_ucontext(struct ib_ucontext * ctx,struct ib_udata * udata)4208  int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4209  {
4210  	struct ib_device *ibdev = ctx->device;
4211  	struct bnxt_re_ucontext *uctx =
4212  		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4213  	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4214  	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
4215  	struct bnxt_re_user_mmap_entry *entry;
4216  	struct bnxt_re_uctx_resp resp = {};
4217  	struct bnxt_re_uctx_req ureq = {};
4218  	u32 chip_met_rev_num = 0;
4219  	int rc;
4220  
4221  	ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4222  
4223  	if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4224  		ibdev_dbg(ibdev, " is different from the device %d ",
4225  			  BNXT_RE_ABI_VERSION);
4226  		return -EPERM;
4227  	}
4228  
4229  	uctx->rdev = rdev;
4230  
4231  	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4232  	if (!uctx->shpg) {
4233  		rc = -ENOMEM;
4234  		goto fail;
4235  	}
4236  	spin_lock_init(&uctx->sh_lock);
4237  
4238  	resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4239  	chip_met_rev_num = rdev->chip_ctx->chip_num;
4240  	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4241  			     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4242  	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4243  			     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4244  	resp.chip_id0 = chip_met_rev_num;
4245  	/*Temp, Use xa_alloc instead */
4246  	resp.dev_id = rdev->en_dev->pdev->devfn;
4247  	resp.max_qp = rdev->qplib_ctx.qpc_count;
4248  	resp.pg_size = PAGE_SIZE;
4249  	resp.cqe_sz = sizeof(struct cq_base);
4250  	resp.max_cqd = dev_attr->max_cq_wqes;
4251  
4252  	if (rdev->chip_ctx->modes.db_push)
4253  		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4254  
4255  	entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4256  	if (!entry) {
4257  		rc = -ENOMEM;
4258  		goto cfail;
4259  	}
4260  	uctx->shpage_mmap = &entry->rdma_entry;
4261  	if (rdev->pacing.dbr_pacing)
4262  		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4263  
4264  	if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
4265  		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
4266  
4267  	if (udata->inlen >= sizeof(ureq)) {
4268  		rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
4269  		if (rc)
4270  			goto cfail;
4271  		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
4272  			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4273  			uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
4274  		}
4275  		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
4276  			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4277  			resp.mode = rdev->chip_ctx->modes.wqe_mode;
4278  			if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
4279  				uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
4280  		}
4281  	}
4282  
4283  	rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4284  	if (rc) {
4285  		ibdev_err(ibdev, "Failed to copy user context");
4286  		rc = -EFAULT;
4287  		goto cfail;
4288  	}
4289  
4290  	return 0;
4291  cfail:
4292  	free_page((unsigned long)uctx->shpg);
4293  	uctx->shpg = NULL;
4294  fail:
4295  	return rc;
4296  }
4297  
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)4298  void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4299  {
4300  	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4301  						   struct bnxt_re_ucontext,
4302  						   ib_uctx);
4303  
4304  	struct bnxt_re_dev *rdev = uctx->rdev;
4305  
4306  	rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4307  	uctx->shpage_mmap = NULL;
4308  	if (uctx->shpg)
4309  		free_page((unsigned long)uctx->shpg);
4310  
4311  	if (uctx->dpi.dbr) {
4312  		/* Free DPI only if this is the first PD allocated by the
4313  		 * application and mark the context dpi as NULL
4314  		 */
4315  		bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4316  		uctx->dpi.dbr = NULL;
4317  	}
4318  }
4319  
bnxt_re_search_for_cq(struct bnxt_re_dev * rdev,u32 cq_id)4320  static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
4321  {
4322  	struct bnxt_re_cq *cq = NULL, *tmp_cq;
4323  
4324  	hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
4325  		if (tmp_cq->qplib_cq.id == cq_id) {
4326  			cq = tmp_cq;
4327  			break;
4328  		}
4329  	}
4330  	return cq;
4331  }
4332  
bnxt_re_search_for_srq(struct bnxt_re_dev * rdev,u32 srq_id)4333  static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
4334  {
4335  	struct bnxt_re_srq *srq = NULL, *tmp_srq;
4336  
4337  	hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
4338  		if (tmp_srq->qplib_srq.id == srq_id) {
4339  			srq = tmp_srq;
4340  			break;
4341  		}
4342  	}
4343  	return srq;
4344  }
4345  
4346  /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)4347  int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4348  {
4349  	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4350  						   struct bnxt_re_ucontext,
4351  						   ib_uctx);
4352  	struct bnxt_re_user_mmap_entry *bnxt_entry;
4353  	struct rdma_user_mmap_entry *rdma_entry;
4354  	int ret = 0;
4355  	u64 pfn;
4356  
4357  	rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4358  	if (!rdma_entry)
4359  		return -EINVAL;
4360  
4361  	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4362  				  rdma_entry);
4363  
4364  	switch (bnxt_entry->mmap_flag) {
4365  	case BNXT_RE_MMAP_WC_DB:
4366  		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4367  		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4368  					pgprot_writecombine(vma->vm_page_prot),
4369  					rdma_entry);
4370  		break;
4371  	case BNXT_RE_MMAP_UC_DB:
4372  		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4373  		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4374  					pgprot_noncached(vma->vm_page_prot),
4375  				rdma_entry);
4376  		break;
4377  	case BNXT_RE_MMAP_SH_PAGE:
4378  		ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4379  		break;
4380  	case BNXT_RE_MMAP_DBR_BAR:
4381  		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4382  		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4383  					pgprot_noncached(vma->vm_page_prot),
4384  					rdma_entry);
4385  		break;
4386  	case BNXT_RE_MMAP_DBR_PAGE:
4387  	case BNXT_RE_MMAP_TOGGLE_PAGE:
4388  		/* Driver doesn't expect write access for user space */
4389  		if (vma->vm_flags & VM_WRITE)
4390  			return -EFAULT;
4391  		ret = vm_insert_page(vma, vma->vm_start,
4392  				     virt_to_page((void *)bnxt_entry->mem_offset));
4393  		break;
4394  	default:
4395  		ret = -EINVAL;
4396  		break;
4397  	}
4398  
4399  	rdma_user_mmap_entry_put(rdma_entry);
4400  	return ret;
4401  }
4402  
bnxt_re_mmap_free(struct rdma_user_mmap_entry * rdma_entry)4403  void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4404  {
4405  	struct bnxt_re_user_mmap_entry *bnxt_entry;
4406  
4407  	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4408  				  rdma_entry);
4409  
4410  	kfree(bnxt_entry);
4411  }
4412  
UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)4413  static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4414  {
4415  	struct bnxt_re_ucontext *uctx;
4416  
4417  	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4418  	bnxt_re_pacing_alert(uctx->rdev);
4419  	return 0;
4420  }
4421  
UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)4422  static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4423  {
4424  	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4425  	enum bnxt_re_alloc_page_type alloc_type;
4426  	struct bnxt_re_user_mmap_entry *entry;
4427  	enum bnxt_re_mmap_flag mmap_flag;
4428  	struct bnxt_qplib_chip_ctx *cctx;
4429  	struct bnxt_re_ucontext *uctx;
4430  	struct bnxt_re_dev *rdev;
4431  	u64 mmap_offset;
4432  	u32 length;
4433  	u32 dpi;
4434  	u64 addr;
4435  	int err;
4436  
4437  	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4438  	if (IS_ERR(uctx))
4439  		return PTR_ERR(uctx);
4440  
4441  	err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4442  	if (err)
4443  		return err;
4444  
4445  	rdev = uctx->rdev;
4446  	cctx = rdev->chip_ctx;
4447  
4448  	switch (alloc_type) {
4449  	case BNXT_RE_ALLOC_WC_PAGE:
4450  		if (cctx->modes.db_push)  {
4451  			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4452  						 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4453  				return -ENOMEM;
4454  			length = PAGE_SIZE;
4455  			dpi = uctx->wcdpi.dpi;
4456  			addr = (u64)uctx->wcdpi.umdbr;
4457  			mmap_flag = BNXT_RE_MMAP_WC_DB;
4458  		} else {
4459  			return -EINVAL;
4460  		}
4461  
4462  		break;
4463  	case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4464  		length = PAGE_SIZE;
4465  		addr = (u64)rdev->pacing.dbr_bar_addr;
4466  		mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4467  		break;
4468  
4469  	case BNXT_RE_ALLOC_DBR_PAGE:
4470  		length = PAGE_SIZE;
4471  		addr = (u64)rdev->pacing.dbr_page;
4472  		mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4473  		break;
4474  
4475  	default:
4476  		return -EOPNOTSUPP;
4477  	}
4478  
4479  	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4480  	if (!entry)
4481  		return -ENOMEM;
4482  
4483  	uobj->object = entry;
4484  	uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4485  	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4486  			     &mmap_offset, sizeof(mmap_offset));
4487  	if (err)
4488  		return err;
4489  
4490  	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4491  			     &length, sizeof(length));
4492  	if (err)
4493  		return err;
4494  
4495  	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4496  			     &dpi, sizeof(length));
4497  	if (err)
4498  		return err;
4499  
4500  	return 0;
4501  }
4502  
alloc_page_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4503  static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4504  				  enum rdma_remove_reason why,
4505  			    struct uverbs_attr_bundle *attrs)
4506  {
4507  	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4508  	struct bnxt_re_ucontext *uctx = entry->uctx;
4509  
4510  	switch (entry->mmap_flag) {
4511  	case BNXT_RE_MMAP_WC_DB:
4512  		if (uctx && uctx->wcdpi.dbr) {
4513  			struct bnxt_re_dev *rdev = uctx->rdev;
4514  
4515  			bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4516  			uctx->wcdpi.dbr = NULL;
4517  		}
4518  		break;
4519  	case BNXT_RE_MMAP_DBR_BAR:
4520  	case BNXT_RE_MMAP_DBR_PAGE:
4521  		break;
4522  	default:
4523  		goto exit;
4524  	}
4525  	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4526  exit:
4527  	return 0;
4528  }
4529  
4530  DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4531  			    UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4532  					    BNXT_RE_OBJECT_ALLOC_PAGE,
4533  					    UVERBS_ACCESS_NEW,
4534  					    UA_MANDATORY),
4535  			    UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4536  						 enum bnxt_re_alloc_page_type,
4537  						 UA_MANDATORY),
4538  			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4539  						UVERBS_ATTR_TYPE(u64),
4540  						UA_MANDATORY),
4541  			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4542  						UVERBS_ATTR_TYPE(u32),
4543  						UA_MANDATORY),
4544  			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4545  						UVERBS_ATTR_TYPE(u32),
4546  						UA_MANDATORY));
4547  
4548  DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4549  				    UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4550  						    BNXT_RE_OBJECT_ALLOC_PAGE,
4551  						    UVERBS_ACCESS_DESTROY,
4552  						    UA_MANDATORY));
4553  
4554  DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4555  			    UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4556  			    &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4557  			    &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4558  
4559  DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4560  
4561  DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4562  			      &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4563  
4564  /* Toggle MEM */
UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)4565  static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
4566  {
4567  	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4568  	enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4569  	enum bnxt_re_get_toggle_mem_type res_type;
4570  	struct bnxt_re_user_mmap_entry *entry;
4571  	struct bnxt_re_ucontext *uctx;
4572  	struct ib_ucontext *ib_uctx;
4573  	struct bnxt_re_dev *rdev;
4574  	struct bnxt_re_srq *srq;
4575  	u32 length = PAGE_SIZE;
4576  	struct bnxt_re_cq *cq;
4577  	u64 mem_offset;
4578  	u32 offset = 0;
4579  	u64 addr = 0;
4580  	u32 res_id;
4581  	int err;
4582  
4583  	ib_uctx = ib_uverbs_get_ucontext(attrs);
4584  	if (IS_ERR(ib_uctx))
4585  		return PTR_ERR(ib_uctx);
4586  
4587  	err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
4588  	if (err)
4589  		return err;
4590  
4591  	uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
4592  	rdev = uctx->rdev;
4593  	err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
4594  	if (err)
4595  		return err;
4596  
4597  	switch (res_type) {
4598  	case BNXT_RE_CQ_TOGGLE_MEM:
4599  		cq = bnxt_re_search_for_cq(rdev, res_id);
4600  		if (!cq)
4601  			return -EINVAL;
4602  
4603  		addr = (u64)cq->uctx_cq_page;
4604  		break;
4605  	case BNXT_RE_SRQ_TOGGLE_MEM:
4606  		srq = bnxt_re_search_for_srq(rdev, res_id);
4607  		if (!srq)
4608  			return -EINVAL;
4609  
4610  		addr = (u64)srq->uctx_srq_page;
4611  		break;
4612  
4613  	default:
4614  		return -EOPNOTSUPP;
4615  	}
4616  
4617  	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
4618  	if (!entry)
4619  		return -ENOMEM;
4620  
4621  	uobj->object = entry;
4622  	uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4623  	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4624  			     &mem_offset, sizeof(mem_offset));
4625  	if (err)
4626  		return err;
4627  
4628  	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4629  			     &length, sizeof(length));
4630  	if (err)
4631  		return err;
4632  
4633  	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4634  			     &offset, sizeof(length));
4635  	if (err)
4636  		return err;
4637  
4638  	return 0;
4639  }
4640  
get_toggle_mem_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4641  static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
4642  				      enum rdma_remove_reason why,
4643  				      struct uverbs_attr_bundle *attrs)
4644  {
4645  	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4646  
4647  	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4648  	return 0;
4649  }
4650  
4651  DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
4652  			    UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
4653  					    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4654  					    UVERBS_ACCESS_NEW,
4655  					    UA_MANDATORY),
4656  			    UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
4657  						 enum bnxt_re_get_toggle_mem_type,
4658  						 UA_MANDATORY),
4659  			    UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
4660  					       UVERBS_ATTR_TYPE(u32),
4661  					       UA_MANDATORY),
4662  			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4663  						UVERBS_ATTR_TYPE(u64),
4664  						UA_MANDATORY),
4665  			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4666  						UVERBS_ATTR_TYPE(u32),
4667  						UA_MANDATORY),
4668  			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4669  						UVERBS_ATTR_TYPE(u32),
4670  						UA_MANDATORY));
4671  
4672  DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
4673  				    UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
4674  						    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4675  						    UVERBS_ACCESS_DESTROY,
4676  						    UA_MANDATORY));
4677  
4678  DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4679  			    UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
4680  			    &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
4681  			    &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
4682  
4683  const struct uapi_definition bnxt_re_uapi_defs[] = {
4684  	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4685  	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
4686  	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
4687  	{}
4688  };
4689